1 /* $NetBSD: evthread.c,v 1.1.1.1 2013/04/11 16:43:25 christos Exp $ */
3 * Copyright (c) 2008-2012 Niels Provos, Nick Mathewson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include "event2/event-config.h"
29 #include <sys/cdefs.h>
30 __RCSID("$NetBSD: evthread.c,v 1.1.1.1 2013/04/11 16:43:25 christos Exp $");
32 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
34 #include "event2/thread.h"
39 #include "log-internal.h"
40 #include "mm-internal.h"
41 #include "util-internal.h"
42 #include "evthread-internal.h"
44 #ifdef EVTHREAD_EXPOSE_STRUCTS
51 GLOBAL
int _evthread_lock_debugging_enabled
= 0;
52 GLOBAL
struct evthread_lock_callbacks _evthread_lock_fns
= {
53 0, 0, NULL
, NULL
, NULL
, NULL
55 GLOBAL
unsigned long (*_evthread_id_fn
)(void) = NULL
;
56 GLOBAL
struct evthread_condition_callbacks _evthread_cond_fns
= {
57 0, NULL
, NULL
, NULL
, NULL
60 /* Used for debugging */
61 static struct evthread_lock_callbacks _original_lock_fns
= {
62 0, 0, NULL
, NULL
, NULL
, NULL
64 static struct evthread_condition_callbacks _original_cond_fns
= {
65 0, NULL
, NULL
, NULL
, NULL
69 evthread_set_id_callback(unsigned long (*id_fn
)(void))
71 _evthread_id_fn
= id_fn
;
75 evthread_set_lock_callbacks(const struct evthread_lock_callbacks
*cbs
)
77 struct evthread_lock_callbacks
*target
=
78 _evthread_lock_debugging_enabled
79 ? &_original_lock_fns
: &_evthread_lock_fns
;
83 event_warnx("Trying to disable lock functions after "
84 "they have been set up will probaby not work.");
85 memset(target
, 0, sizeof(_evthread_lock_fns
));
89 /* Uh oh; we already had locking callbacks set up.*/
90 if (target
->lock_api_version
== cbs
->lock_api_version
&&
91 target
->supported_locktypes
== cbs
->supported_locktypes
&&
92 target
->alloc
== cbs
->alloc
&&
93 target
->free
== cbs
->free
&&
94 target
->lock
== cbs
->lock
&&
95 target
->unlock
== cbs
->unlock
) {
96 /* no change -- allow this. */
99 event_warnx("Can't change lock callbacks once they have been "
103 if (cbs
->alloc
&& cbs
->free
&& cbs
->lock
&& cbs
->unlock
) {
104 memcpy(target
, cbs
, sizeof(_evthread_lock_fns
));
105 return event_global_setup_locks_(1);
112 evthread_set_condition_callbacks(const struct evthread_condition_callbacks
*cbs
)
114 struct evthread_condition_callbacks
*target
=
115 _evthread_lock_debugging_enabled
116 ? &_original_cond_fns
: &_evthread_cond_fns
;
119 if (target
->alloc_condition
)
120 event_warnx("Trying to disable condition functions "
121 "after they have been set up will probaby not "
123 memset(target
, 0, sizeof(_evthread_cond_fns
));
126 if (target
->alloc_condition
) {
127 /* Uh oh; we already had condition callbacks set up.*/
128 if (target
->condition_api_version
== cbs
->condition_api_version
&&
129 target
->alloc_condition
== cbs
->alloc_condition
&&
130 target
->free_condition
== cbs
->free_condition
&&
131 target
->signal_condition
== cbs
->signal_condition
&&
132 target
->wait_condition
== cbs
->wait_condition
) {
133 /* no change -- allow this. */
136 event_warnx("Can't change condition callbacks once they "
137 "have been initialized.");
140 if (cbs
->alloc_condition
&& cbs
->free_condition
&&
141 cbs
->signal_condition
&& cbs
->wait_condition
) {
142 memcpy(target
, cbs
, sizeof(_evthread_cond_fns
));
144 if (_evthread_lock_debugging_enabled
) {
145 _evthread_cond_fns
.alloc_condition
= cbs
->alloc_condition
;
146 _evthread_cond_fns
.free_condition
= cbs
->free_condition
;
147 _evthread_cond_fns
.signal_condition
= cbs
->signal_condition
;
154 unsigned long held_by
;
155 /* XXXX if we ever use read-write locks, we will need a separate
156 * lock to protect count. */
162 debug_lock_alloc(unsigned locktype
)
164 struct debug_lock
*result
= mm_malloc(sizeof(struct debug_lock
));
167 if (_original_lock_fns
.alloc
) {
168 if (!(result
->lock
= _original_lock_fns
.alloc(
169 locktype
|EVTHREAD_LOCKTYPE_RECURSIVE
))) {
176 result
->locktype
= locktype
;
183 debug_lock_free(void *lock_
, unsigned locktype
)
185 struct debug_lock
*lock
= lock_
;
186 EVUTIL_ASSERT(lock
->count
== 0);
187 EVUTIL_ASSERT(locktype
== lock
->locktype
);
188 if (_original_lock_fns
.free
) {
189 _original_lock_fns
.free(lock
->lock
,
190 lock
->locktype
|EVTHREAD_LOCKTYPE_RECURSIVE
);
198 evthread_debug_lock_mark_locked(unsigned mode
, struct debug_lock
*lock
)
201 if (!(lock
->locktype
& EVTHREAD_LOCKTYPE_RECURSIVE
))
202 EVUTIL_ASSERT(lock
->count
== 1);
203 if (_evthread_id_fn
) {
205 me
= _evthread_id_fn();
207 EVUTIL_ASSERT(lock
->held_by
== me
);
213 debug_lock_lock(unsigned mode
, void *lock_
)
215 struct debug_lock
*lock
= lock_
;
217 if (lock
->locktype
& EVTHREAD_LOCKTYPE_READWRITE
)
218 EVUTIL_ASSERT(mode
& (EVTHREAD_READ
|EVTHREAD_WRITE
));
220 EVUTIL_ASSERT((mode
& (EVTHREAD_READ
|EVTHREAD_WRITE
)) == 0);
221 if (_original_lock_fns
.lock
)
222 res
= _original_lock_fns
.lock(mode
, lock
->lock
);
224 evthread_debug_lock_mark_locked(mode
, lock
);
230 evthread_debug_lock_mark_unlocked(unsigned mode
, struct debug_lock
*lock
)
232 if (lock
->locktype
& EVTHREAD_LOCKTYPE_READWRITE
)
233 EVUTIL_ASSERT(mode
& (EVTHREAD_READ
|EVTHREAD_WRITE
));
235 EVUTIL_ASSERT((mode
& (EVTHREAD_READ
|EVTHREAD_WRITE
)) == 0);
236 if (_evthread_id_fn
) {
237 EVUTIL_ASSERT(lock
->held_by
== _evthread_id_fn());
238 if (lock
->count
== 1)
242 EVUTIL_ASSERT(lock
->count
>= 0);
246 debug_lock_unlock(unsigned mode
, void *lock_
)
248 struct debug_lock
*lock
= lock_
;
250 evthread_debug_lock_mark_unlocked(mode
, lock
);
251 if (_original_lock_fns
.unlock
)
252 res
= _original_lock_fns
.unlock(mode
, lock
->lock
);
257 debug_cond_wait(void *_cond
, void *_lock
, const struct timeval
*tv
)
260 struct debug_lock
*lock
= _lock
;
262 EVLOCK_ASSERT_LOCKED(_lock
);
263 evthread_debug_lock_mark_unlocked(0, lock
);
264 r
= _original_cond_fns
.wait_condition(_cond
, lock
->lock
, tv
);
265 evthread_debug_lock_mark_locked(0, lock
);
270 evthread_enable_lock_debuging(void)
272 struct evthread_lock_callbacks cbs
= {
273 EVTHREAD_LOCK_API_VERSION
,
274 EVTHREAD_LOCKTYPE_RECURSIVE
,
280 if (_evthread_lock_debugging_enabled
)
282 memcpy(&_original_lock_fns
, &_evthread_lock_fns
,
283 sizeof(struct evthread_lock_callbacks
));
284 memcpy(&_evthread_lock_fns
, &cbs
,
285 sizeof(struct evthread_lock_callbacks
));
287 memcpy(&_original_cond_fns
, &_evthread_cond_fns
,
288 sizeof(struct evthread_condition_callbacks
));
289 _evthread_cond_fns
.wait_condition
= debug_cond_wait
;
290 _evthread_lock_debugging_enabled
= 1;
292 /* XXX return value should get checked. */
293 event_global_setup_locks_(0);
297 _evthread_is_debug_lock_held(void *lock_
)
299 struct debug_lock
*lock
= lock_
;
302 if (_evthread_id_fn
) {
303 unsigned long me
= _evthread_id_fn();
304 if (lock
->held_by
!= me
)
311 _evthread_debug_get_real_lock(void *lock_
)
313 struct debug_lock
*lock
= lock_
;
318 evthread_setup_global_lock_(void *lock_
, unsigned locktype
, int enable_locks
)
320 /* there are four cases here:
321 1) we're turning on debugging; locking is not on.
322 2) we're turning on debugging; locking is on.
323 3) we're turning on locking; debugging is not on.
324 4) we're turning on locking; debugging is on. */
326 if (!enable_locks
&& _original_lock_fns
.alloc
== NULL
) {
327 /* Case 1: allocate a debug lock. */
328 EVUTIL_ASSERT(lock_
== NULL
);
329 return debug_lock_alloc(locktype
);
330 } else if (!enable_locks
&& _original_lock_fns
.alloc
!= NULL
) {
331 /* Case 2: wrap the lock in a debug lock. */
332 struct debug_lock
*lock
;
333 EVUTIL_ASSERT(lock_
!= NULL
);
335 if (!(locktype
& EVTHREAD_LOCKTYPE_RECURSIVE
)) {
336 /* We can't wrap it: We need a recursive lock */
337 _original_lock_fns
.free(lock_
, locktype
);
338 return debug_lock_alloc(locktype
);
340 lock
= mm_malloc(sizeof(struct debug_lock
));
342 _original_lock_fns
.free(lock_
, locktype
);
346 lock
->locktype
= locktype
;
350 } else if (enable_locks
&& ! _evthread_lock_debugging_enabled
) {
351 /* Case 3: allocate a regular lock */
352 EVUTIL_ASSERT(lock_
== NULL
);
353 return _evthread_lock_fns
.alloc(locktype
);
355 /* Case 4: Fill in a debug lock with a real lock */
356 struct debug_lock
*lock
= lock_
;
357 EVUTIL_ASSERT(enable_locks
&&
358 _evthread_lock_debugging_enabled
);
359 EVUTIL_ASSERT(lock
->locktype
== locktype
);
360 EVUTIL_ASSERT(lock
->lock
== NULL
);
361 lock
->lock
= _original_lock_fns
.alloc(
362 locktype
|EVTHREAD_LOCKTYPE_RECURSIVE
);
373 #ifndef EVTHREAD_EXPOSE_STRUCTS
375 _evthreadimpl_get_id()
377 return _evthread_id_fn
? _evthread_id_fn() : 1;
380 _evthreadimpl_lock_alloc(unsigned locktype
)
382 return _evthread_lock_fns
.alloc
?
383 _evthread_lock_fns
.alloc(locktype
) : NULL
;
386 _evthreadimpl_lock_free(void *lock
, unsigned locktype
)
388 if (_evthread_lock_fns
.free
)
389 _evthread_lock_fns
.free(lock
, locktype
);
392 _evthreadimpl_lock_lock(unsigned mode
, void *lock
)
394 if (_evthread_lock_fns
.lock
)
395 return _evthread_lock_fns
.lock(mode
, lock
);
400 _evthreadimpl_lock_unlock(unsigned mode
, void *lock
)
402 if (_evthread_lock_fns
.unlock
)
403 return _evthread_lock_fns
.unlock(mode
, lock
);
408 _evthreadimpl_cond_alloc(unsigned condtype
)
410 return _evthread_cond_fns
.alloc_condition
?
411 _evthread_cond_fns
.alloc_condition(condtype
) : NULL
;
414 _evthreadimpl_cond_free(void *cond
)
416 if (_evthread_cond_fns
.free_condition
)
417 _evthread_cond_fns
.free_condition(cond
);
420 _evthreadimpl_cond_signal(void *cond
, int broadcast
)
422 if (_evthread_cond_fns
.signal_condition
)
423 return _evthread_cond_fns
.signal_condition(cond
, broadcast
);
428 _evthreadimpl_cond_wait(void *cond
, void *lock
, const struct timeval
*tv
)
430 if (_evthread_cond_fns
.wait_condition
)
431 return _evthread_cond_fns
.wait_condition(cond
, lock
, tv
);
436 _evthreadimpl_is_lock_debugging_enabled(void)
438 return _evthread_lock_debugging_enabled
;
442 _evthreadimpl_locking_enabled(void)
444 return _evthread_lock_fns
.lock
!= NULL
;