prex: make MAX_INHERIT work
[prex.git] / sys / sync / mutex.c
blob2d0fd649b60ea6b5f90e215132b59139be236073
1 /*-
2 * Copyright (c) 2005-2007, Kohsuke Ohtani
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
31 * mutex.c - mutual exclusion service.
35 * A mutex is used to protect un-sharable resources.
36 * A thread can use mutex_lock() to ensure that global resource is not
37 * accessed by other thread. The mutex is effective only the threads
38 * belonging to the same task.
40 * Prex will change the thread priority to prevent priority inversion.
42 * <Priority inheritance>
43 * The priority is changed at the following conditions.
45 * 1. When the current thread can not lock the mutex and its mutex
46 * owner has lower priority than current thread, the priority
47 * of mutex owner is boosted to same priority with current thread.
48 * If this mutex owner is waiting for another mutex, such related
49 * mutexes are also processed.
51 * 2. When the current thread unlocks the mutex and its priority
52 * has already been inherited, the current priority is reset.
53 * In this time, the current priority is changed to the highest
54 * priority among the threads waiting for the mutexes locked by
55 * current thread.
57 * 3. When the thread priority is changed by user request, the inherited
58 * thread's priority is changed.
60 * <Limitation>
62 * 1. If the priority is changed by user request, the priority
63 * recomputation is done only when the new priority is higher
64 * than old priority. The inherited priority is reset to base
65 * priority when the mutex is unlocked.
67 * 2. Even if thread is killed with mutex waiting, the related
68 * priority is not adjusted.
70 * <Important>
71 * Since this implementation does not support recursive lock, a thread
72 * can not lock the same mutex twice.
75 #include <kernel.h>
76 #include <list.h>
77 #include <event.h>
78 #include <sched.h>
79 #include <kmem.h>
80 #include <thread.h>
81 #include <task.h>
82 #include <sync.h>
84 #define MAX_INHERIT 10 /* Max mutex count to inherit priority */
86 /* Forward declarations */
87 static int prio_inherit(thread_t th);
88 static void prio_uninherit(thread_t th);
91 * Initialize a mutex.
93 * If an initialized mutex is reinitialized, undefined
94 * behavior results.
96 __syscall int mutex_init(mutex_t *mtx)
98 mutex_t m;
100 if ((m = kmem_alloc(sizeof(struct mutex))) == NULL)
101 return ENOMEM;
102 event_init(&m->event, "mutex");
103 m->task = cur_task();
104 m->owner = NULL;
105 m->prio = MIN_PRIO;
106 m->magic = MUTEX_MAGIC;
107 if (umem_copyout(&m, mtx, sizeof(mutex_t))) {
108 kmem_free(m);
109 return EFAULT;
111 return 0;
115 * Destroy the specified mutex.
116 * The mutex must be unlock state, otherwise it fails with EBUSY.
118 __syscall int mutex_destroy(mutex_t *mtx)
120 mutex_t m;
122 sched_lock();
123 if (umem_copyin(mtx, &m, sizeof(mutex_t))) {
124 sched_unlock();
125 return EFAULT;
127 if (!mutex_valid(m)) {
128 sched_unlock();
129 return EINVAL;
131 if (m->owner || event_waiting(&m->event)) {
132 sched_unlock();
133 return EBUSY;
135 m->magic = 0;
136 kmem_free(m);
137 sched_unlock();
138 return 0;
142 * Copy mutex from user space.
143 * If it is not initialized, create new mutex.
145 * @um: Pointer to mutex in user space.
146 * @km: Pointer to mutex in kernel space.
148 static int mutex_copyin(mutex_t *um, mutex_t *km)
150 mutex_t m;
151 int err;
153 if (umem_copyin(um, &m, sizeof(mutex_t)))
154 return EFAULT;
156 if (m == MUTEX_INITIALIZER) {
157 /* Allocate mutex */
158 if ((err = mutex_init(um)))
159 return err;
160 umem_copyin(um, &m, sizeof(mutex_t));
161 } else {
162 if (!mutex_valid(m))
163 return EINVAL;
165 *km = m;
166 return 0;
170 * Lock a mutex.
172 * A current thread is blocked if the mutex has already been locked.
173 * If current thread receives any exception while waiting mutex, this
174 * routine returns with EINTR in order to invoke exception handler.
175 * But, POSIX thread assumes this function does NOT return with EINTR.
176 * So, system call stub routine in library must call this again
177 * if it gets EINTR.
179 __syscall int mutex_lock(mutex_t *mtx)
181 mutex_t m;
182 int result, err;
184 sched_lock();
185 if ((err = mutex_copyin(mtx, &m))) {
186 sched_unlock();
187 return err;
189 if (m->owner == cur_thread) {
190 /* Recursive lock */
191 m->lock_count++;
192 } else {
194 * Check whether a target mutex is locked. If the mutex
195 * is not locked, this routine returns immediately.
197 if (m->owner == NULL)
198 m->prio = cur_thread->prio;
199 else {
201 * Wait for a mutex.
203 cur_thread->wait_mutex = m;
204 if ((err = prio_inherit(cur_thread))) {
205 sched_unlock();
206 return err;
208 result = sched_sleep(&m->event);
209 cur_thread->wait_mutex = NULL;
210 if (result == SLP_INTR) {
211 sched_unlock();
212 return EINTR;
216 m->owner = cur_thread;
217 m->lock_count = 1;
218 list_insert(&cur_thread->mutexes, &m->link);
219 sched_unlock();
220 return 0;
224 * Try to lock a mutex without blocking.
226 __syscall int mutex_trylock(mutex_t *mtx)
228 mutex_t m;
229 int err;
231 sched_lock();
232 if ((err = mutex_copyin(mtx, &m))) {
233 sched_unlock();
234 return err;
236 if (m->owner == cur_thread) {
237 m->lock_count++;
238 sched_unlock();
239 return 0;
241 if (m->owner != NULL) {
242 sched_unlock();
243 return EBUSY;
245 m->owner = cur_thread;
246 m->lock_count = 1;
247 list_insert(&cur_thread->mutexes, &m->link);
249 sched_unlock();
250 return 0;
254 * Unlock a mutex.
255 * Caller must be a current mutex owner.
257 __syscall int mutex_unlock(mutex_t *mtx)
259 mutex_t m;
260 int err;
262 sched_lock();
263 if ((err = mutex_copyin(mtx, &m))) {
264 sched_unlock();
265 return err;
267 if (m->owner != cur_thread || m->lock_count <= 0) {
268 sched_unlock();
269 return EPERM;
271 if (--m->lock_count == 0) {
272 list_remove(&m->link);
273 prio_uninherit(cur_thread);
275 * Change the mutex owner, and make the next
276 * owner runnable if it exists.
278 m->owner = sched_wakeone(&m->event);
279 if (m->owner)
280 m->owner->wait_mutex = NULL;
282 m->prio = m->owner ? m->owner->prio : MIN_PRIO;
284 sched_unlock();
285 return 0;
290 * Clean up mutex.
291 * This is called with scheduling locked when thread is terminated.
293 * If a thread is terminated with mutex hold, all waiting threads
294 * keeps waiting forever. So, all mutex locked by terminated thread
295 * must be unlocked.
297 * Even if the terminated thread is waiting some mutex, the inherited
298 * priority of other mutex owner is not adjusted.
300 void mutex_cleanup(thread_t th)
302 list_t head, n;
303 mutex_t mtx;
304 thread_t owner;
306 /* Process all mutexes locked by the thread. */
307 head = &th->mutexes;
308 for (n = list_first(head); n != head; n = list_next(n)) {
310 * Release locked mutex.
312 mtx = list_entry(n, struct mutex, link);
313 mtx->lock_count = 0;
314 list_remove(&mtx->link);
316 * Change the mutex owner if other thread is
317 * waiting for it.
319 owner = sched_wakeone(&mtx->event);
320 if (owner) {
321 owner->wait_mutex = NULL;
322 mtx->lock_count = 1;
323 list_insert(&owner->mutexes, &mtx->link);
326 mtx->owner = owner;
331 * This is called with scheduling locked before thread priority is changed.
333 void mutex_setprio(thread_t th, int prio)
335 if (th->wait_mutex && prio < th->prio)
336 prio_inherit(th);
340 * Inherit priority.
342 * @waiter: Thread that is about to wait a mutex.
344 * The higher priority thread should not wait lower priority thread.
345 * So, raise the priority of mutex owner which blocks the specified thread.
346 * If mutex owner is also waiting for other mutex, that mutex is also
347 * processed.
349 * Returns EDEALK if it finds deadlock condition.
351 static int prio_inherit(thread_t waiter)
353 mutex_t mtx = waiter->wait_mutex;
354 thread_t owner;
355 int count = 0;
357 do {
358 owner = mtx->owner;
361 * If the owner of relative mutex has already been waiting
362 * for the "waiter" thread, it causes a deadlock.
364 if (owner == waiter) {
365 printk("Detect deadlock! mutex=%x owner=%x waiter=%x\n",
366 mtx, owner, waiter);
367 return EDEADLK;
370 * If the priority of the mutex owner is lower than "waiter"
371 * thread's, it is automatically adjusted.
373 if (owner->prio > waiter->prio) {
374 sched_setprio(owner, owner->base_prio, waiter->prio);
375 mtx->prio = waiter->prio;
378 * If the mutex owner is waiting for another mutex, that
379 * mutex is also processed.
381 mtx = (mutex_t)owner->wait_mutex;
383 if (++count >= MAX_INHERIT)
384 break; /* REVISIT: this should be an error */
386 } while (mtx != NULL);
387 return 0;
391 * Un-inherit priority
393 * The priority of specified thread is reset to the base priority.
394 * If specified thread locks other mutex and higher priority thread
395 * is waiting for it, the priority is kept to that level.
397 static void prio_uninherit(thread_t th)
399 int top_prio;
400 list_t head, n;
401 mutex_t mtx;
403 /* Check if the priority is inherited. */
404 if (th->prio == th->base_prio)
405 return;
407 top_prio = th->base_prio;
409 * Find the highest priority thread that is waiting for the thread.
410 * This is done by checking all mutexes that the thread locks.
412 head = &th->mutexes;
413 for (n = list_first(head); n != head; n = list_next(n)) {
414 mtx = list_entry(n, struct mutex, link);
415 if (mtx->prio < top_prio)
416 top_prio = mtx->prio;
418 sched_setprio(th, th->base_prio, top_prio);