2 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
10 #define _KERNEL_LOCK_H
15 #include <arch/atomic.h>
21 typedef struct mutex
{
23 struct mutex_waiter
* waiters
;
29 uint16 ignore_unlock_count
;
34 #define MUTEX_FLAG_CLONE_NAME 0x1
37 typedef struct recursive_lock
{
46 struct rw_lock_waiter
;
48 typedef struct rw_lock
{
50 struct rw_lock_waiter
* waiters
;
56 // Only > 0 while a writer is waiting: number
57 // of active readers when the first waiting
58 // writer started waiting.
59 int16 pending_readers
;
60 // Number of readers that have already
61 // incremented "count", but have not yet started
62 // to wait at the time the last writer unlocked.
66 #define RW_LOCK_WRITER_COUNT_BASE 0x10000
68 #define RW_LOCK_FLAG_CLONE_NAME 0x1
72 # define KDEBUG_RW_LOCK_DEBUG 0
73 // Define to 1 if you want to use ASSERT_READ_LOCKED_RW_LOCK().
74 // The rw_lock will just behave like a recursive locker then.
75 # define ASSERT_LOCKED_RECURSIVE(r) \
76 { ASSERT(find_thread(NULL) == (r)->lock.holder); }
77 # define ASSERT_LOCKED_MUTEX(m) { ASSERT(find_thread(NULL) == (m)->holder); }
78 # define ASSERT_WRITE_LOCKED_RW_LOCK(l) \
79 { ASSERT(find_thread(NULL) == (l)->holder); }
80 # if KDEBUG_RW_LOCK_DEBUG
81 # define ASSERT_READ_LOCKED_RW_LOCK(l) \
82 { ASSERT(find_thread(NULL) == (l)->holder); }
84 # define ASSERT_READ_LOCKED_RW_LOCK(l) do {} while (false)
87 # define ASSERT_LOCKED_RECURSIVE(r) do {} while (false)
88 # define ASSERT_LOCKED_MUTEX(m) do {} while (false)
89 # define ASSERT_WRITE_LOCKED_RW_LOCK(m) do {} while (false)
90 # define ASSERT_READ_LOCKED_RW_LOCK(l) do {} while (false)
94 // static initializers
96 # define MUTEX_INITIALIZER(name) \
97 { name, NULL, B_SPINLOCK_INITIALIZER, -1, 0 }
98 # define RECURSIVE_LOCK_INITIALIZER(name) { MUTEX_INITIALIZER(name), 0 }
100 # define MUTEX_INITIALIZER(name) \
101 { name, NULL, B_SPINLOCK_INITIALIZER, 0, 0, 0 }
102 # define RECURSIVE_LOCK_INITIALIZER(name) { MUTEX_INITIALIZER(name), -1, 0 }
105 #define RW_LOCK_INITIALIZER(name) \
106 { name, NULL, B_SPINLOCK_INITIALIZER, -1, 0, 0, 0 }
110 # define RECURSIVE_LOCK_HOLDER(recursiveLock) ((recursiveLock)->lock.holder)
112 # define RECURSIVE_LOCK_HOLDER(recursiveLock) ((recursiveLock)->holder)
120 extern void recursive_lock_init(recursive_lock
*lock
, const char *name
);
121 // name is *not* cloned nor freed in recursive_lock_destroy()
122 extern void recursive_lock_init_etc(recursive_lock
*lock
, const char *name
,
124 extern void recursive_lock_destroy(recursive_lock
*lock
);
125 extern status_t
recursive_lock_lock(recursive_lock
*lock
);
126 extern status_t
recursive_lock_trylock(recursive_lock
*lock
);
127 extern void recursive_lock_unlock(recursive_lock
*lock
);
128 extern int32
recursive_lock_get_recursion(recursive_lock
*lock
);
130 extern void rw_lock_init(rw_lock
* lock
, const char* name
);
131 // name is *not* cloned nor freed in rw_lock_destroy()
132 extern void rw_lock_init_etc(rw_lock
* lock
, const char* name
, uint32 flags
);
133 extern void rw_lock_destroy(rw_lock
* lock
);
134 extern status_t
rw_lock_write_lock(rw_lock
* lock
);
136 extern void mutex_init(mutex
* lock
, const char* name
);
137 // name is *not* cloned nor freed in mutex_destroy()
138 extern void mutex_init_etc(mutex
* lock
, const char* name
, uint32 flags
);
139 extern void mutex_destroy(mutex
* lock
);
140 extern status_t
mutex_switch_lock(mutex
* from
, mutex
* to
);
141 // Unlocks "from" and locks "to" such that unlocking and starting to wait
142 // for the lock is atomically. I.e. if "from" guards the object "to" belongs
143 // to, the operation is safe as long as "from" is held while destroying
145 extern status_t
mutex_switch_from_read_lock(rw_lock
* from
, mutex
* to
);
146 // Like mutex_switch_lock(), just for a switching from a read-locked
150 // implementation private:
152 extern status_t
_rw_lock_read_lock(rw_lock
* lock
);
153 extern status_t
_rw_lock_read_lock_with_timeout(rw_lock
* lock
,
154 uint32 timeoutFlags
, bigtime_t timeout
);
155 extern void _rw_lock_read_unlock(rw_lock
* lock
);
156 extern void _rw_lock_write_unlock(rw_lock
* lock
);
158 extern status_t
_mutex_lock(mutex
* lock
, void* locker
);
159 extern void _mutex_unlock(mutex
* lock
);
160 extern status_t
_mutex_trylock(mutex
* lock
);
161 extern status_t
_mutex_lock_with_timeout(mutex
* lock
, uint32 timeoutFlags
,
165 static inline status_t
166 rw_lock_read_lock(rw_lock
* lock
)
168 #if KDEBUG_RW_LOCK_DEBUG
169 return rw_lock_write_lock(lock
);
171 int32 oldCount
= atomic_add(&lock
->count
, 1);
172 if (oldCount
>= RW_LOCK_WRITER_COUNT_BASE
)
173 return _rw_lock_read_lock(lock
);
179 static inline status_t
180 rw_lock_read_lock_with_timeout(rw_lock
* lock
, uint32 timeoutFlags
,
183 #if KDEBUG_RW_LOCK_DEBUG
184 return mutex_lock_with_timeout(lock
, timeoutFlags
, timeout
);
186 int32 oldCount
= atomic_add(&lock
->count
, 1);
187 if (oldCount
>= RW_LOCK_WRITER_COUNT_BASE
)
188 return _rw_lock_read_lock_with_timeout(lock
, timeoutFlags
, timeout
);
195 rw_lock_read_unlock(rw_lock
* lock
)
197 #if KDEBUG_RW_LOCK_DEBUG
198 rw_lock_write_unlock(lock
);
200 int32 oldCount
= atomic_add(&lock
->count
, -1);
201 if (oldCount
>= RW_LOCK_WRITER_COUNT_BASE
)
202 _rw_lock_read_unlock(lock
);
208 rw_lock_write_unlock(rw_lock
* lock
)
210 _rw_lock_write_unlock(lock
);
214 static inline status_t
215 mutex_lock(mutex
* lock
)
218 return _mutex_lock(lock
, NULL
);
220 if (atomic_add(&lock
->count
, -1) < 0)
221 return _mutex_lock(lock
, NULL
);
227 static inline status_t
228 mutex_trylock(mutex
* lock
)
231 return _mutex_trylock(lock
);
233 if (atomic_test_and_set(&lock
->count
, -1, 0) != 0)
234 return B_WOULD_BLOCK
;
240 static inline status_t
241 mutex_lock_with_timeout(mutex
* lock
, uint32 timeoutFlags
, bigtime_t timeout
)
244 return _mutex_lock_with_timeout(lock
, timeoutFlags
, timeout
);
246 if (atomic_add(&lock
->count
, -1) < 0)
247 return _mutex_lock_with_timeout(lock
, timeoutFlags
, timeout
);
254 mutex_unlock(mutex
* lock
)
257 if (atomic_add(&lock
->count
, 1) < -1)
264 mutex_transfer_lock(mutex
* lock
, thread_id thread
)
267 lock
->holder
= thread
;
273 recursive_lock_transfer_lock(recursive_lock
* lock
, thread_id thread
)
275 if (lock
->recursion
!= 1)
276 panic("invalid recursion level for lock transfer!");
279 lock
->lock
.holder
= thread
;
281 lock
->holder
= thread
;
286 extern void lock_debug_init();
292 #endif /* _KERNEL_LOCK_H */