Mark /proc MS_NOSUID and MS_NOEXEC
[linux/fpc-iii.git] / kernel / rtmutex.c
blobd2ef13b485e7a802641ecaa480640a56e7ab1d86
1 /*
2 * RT-Mutexes: simple blocking mutual exclusion locks with PI support
4 * started by Ingo Molnar and Thomas Gleixner.
6 * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7 * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
8 * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
9 * Copyright (C) 2006 Esben Nielsen
11 #include <linux/spinlock.h>
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/timer.h>
16 #include "rtmutex_common.h"
18 #ifdef CONFIG_DEBUG_RT_MUTEXES
19 # include "rtmutex-debug.h"
20 #else
21 # include "rtmutex.h"
22 #endif
25 * lock->owner state tracking:
27 * lock->owner holds the task_struct pointer of the owner. Bit 0 and 1
28 * are used to keep track of the "owner is pending" and "lock has
29 * waiters" state.
31 * owner bit1 bit0
32 * NULL 0 0 lock is free (fast acquire possible)
33 * NULL 0 1 invalid state
34 * NULL 1 0 Transitional State*
35 * NULL 1 1 invalid state
36 * taskpointer 0 0 lock is held (fast release possible)
37 * taskpointer 0 1 task is pending owner
38 * taskpointer 1 0 lock is held and has waiters
39 * taskpointer 1 1 task is pending owner and lock has more waiters
41 * Pending ownership is assigned to the top (highest priority)
42 * waiter of the lock, when the lock is released. The thread is woken
43 * up and can now take the lock. Until the lock is taken (bit 0
44 * cleared) a competing higher priority thread can steal the lock
45 * which puts the woken up thread back on the waiters list.
47 * The fast atomic compare exchange based acquire and release is only
48 * possible when bit 0 and 1 of lock->owner are 0.
50 * (*) There's a small time where the owner can be NULL and the
51 * "lock has waiters" bit is set. This can happen when grabbing the lock.
52 * To prevent a cmpxchg of the owner releasing the lock, we need to set this
53 * bit before looking at the lock, hence the reason this is a transitional
54 * state.
57 static void
58 rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner,
59 unsigned long mask)
61 unsigned long val = (unsigned long)owner | mask;
63 if (rt_mutex_has_waiters(lock))
64 val |= RT_MUTEX_HAS_WAITERS;
66 lock->owner = (struct task_struct *)val;
69 static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
71 lock->owner = (struct task_struct *)
72 ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
75 static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
77 if (!rt_mutex_has_waiters(lock))
78 clear_rt_mutex_waiters(lock);
82 * We can speed up the acquire/release, if the architecture
83 * supports cmpxchg and if there's no debugging state to be set up
85 #if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES)
86 # define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c)
87 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
89 unsigned long owner, *p = (unsigned long *) &lock->owner;
91 do {
92 owner = *p;
93 } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
95 #else
96 # define rt_mutex_cmpxchg(l,c,n) (0)
97 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
99 lock->owner = (struct task_struct *)
100 ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
102 #endif
105 * Calculate task priority from the waiter list priority
107 * Return task->normal_prio when the waiter list is empty or when
108 * the waiter is not allowed to do priority boosting
110 int rt_mutex_getprio(struct task_struct *task)
112 if (likely(!task_has_pi_waiters(task)))
113 return task->normal_prio;
115 return min(task_top_pi_waiter(task)->pi_list_entry.prio,
116 task->normal_prio);
120 * Adjust the priority of a task, after its pi_waiters got modified.
122 * This can be both boosting and unboosting. task->pi_lock must be held.
124 static void __rt_mutex_adjust_prio(struct task_struct *task)
126 int prio = rt_mutex_getprio(task);
128 if (task->prio != prio)
129 rt_mutex_setprio(task, prio);
133 * Adjust task priority (undo boosting). Called from the exit path of
134 * rt_mutex_slowunlock() and rt_mutex_slowlock().
136 * (Note: We do this outside of the protection of lock->wait_lock to
137 * allow the lock to be taken while or before we readjust the priority
138 * of task. We do not use the spin_xx_mutex() variants here as we are
139 * outside of the debug path.)
141 static void rt_mutex_adjust_prio(struct task_struct *task)
143 unsigned long flags;
145 spin_lock_irqsave(&task->pi_lock, flags);
146 __rt_mutex_adjust_prio(task);
147 spin_unlock_irqrestore(&task->pi_lock, flags);
151 * Max number of times we'll walk the boosting chain:
153 int max_lock_depth = 1024;
156 * Adjust the priority chain. Also used for deadlock detection.
157 * Decreases task's usage by one - may thus free the task.
158 * Returns 0 or -EDEADLK.
160 static int rt_mutex_adjust_prio_chain(struct task_struct *task,
161 int deadlock_detect,
162 struct rt_mutex *orig_lock,
163 struct rt_mutex_waiter *orig_waiter,
164 struct task_struct *top_task)
166 struct rt_mutex *lock;
167 struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
168 int detect_deadlock, ret = 0, depth = 0;
169 unsigned long flags;
171 detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter,
172 deadlock_detect);
175 * The (de)boosting is a step by step approach with a lot of
176 * pitfalls. We want this to be preemptible and we want hold a
177 * maximum of two locks per step. So we have to check
178 * carefully whether things change under us.
180 again:
181 if (++depth > max_lock_depth) {
182 static int prev_max;
185 * Print this only once. If the admin changes the limit,
186 * print a new message when reaching the limit again.
188 if (prev_max != max_lock_depth) {
189 prev_max = max_lock_depth;
190 printk(KERN_WARNING "Maximum lock depth %d reached "
191 "task: %s (%d)\n", max_lock_depth,
192 top_task->comm, top_task->pid);
194 put_task_struct(task);
196 return deadlock_detect ? -EDEADLK : 0;
198 retry:
200 * Task can not go away as we did a get_task() before !
202 spin_lock_irqsave(&task->pi_lock, flags);
204 waiter = task->pi_blocked_on;
206 * Check whether the end of the boosting chain has been
207 * reached or the state of the chain has changed while we
208 * dropped the locks.
210 if (!waiter || !waiter->task)
211 goto out_unlock_pi;
213 if (top_waiter && (!task_has_pi_waiters(task) ||
214 top_waiter != task_top_pi_waiter(task)))
215 goto out_unlock_pi;
218 * When deadlock detection is off then we check, if further
219 * priority adjustment is necessary.
221 if (!detect_deadlock && waiter->list_entry.prio == task->prio)
222 goto out_unlock_pi;
224 lock = waiter->lock;
225 if (!spin_trylock(&lock->wait_lock)) {
226 spin_unlock_irqrestore(&task->pi_lock, flags);
227 cpu_relax();
228 goto retry;
231 /* Deadlock detection */
232 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
233 debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
234 spin_unlock(&lock->wait_lock);
235 ret = deadlock_detect ? -EDEADLK : 0;
236 goto out_unlock_pi;
239 top_waiter = rt_mutex_top_waiter(lock);
241 /* Requeue the waiter */
242 plist_del(&waiter->list_entry, &lock->wait_list);
243 waiter->list_entry.prio = task->prio;
244 plist_add(&waiter->list_entry, &lock->wait_list);
246 /* Release the task */
247 spin_unlock_irqrestore(&task->pi_lock, flags);
248 put_task_struct(task);
250 /* Grab the next task */
251 task = rt_mutex_owner(lock);
252 spin_lock_irqsave(&task->pi_lock, flags);
254 if (waiter == rt_mutex_top_waiter(lock)) {
255 /* Boost the owner */
256 plist_del(&top_waiter->pi_list_entry, &task->pi_waiters);
257 waiter->pi_list_entry.prio = waiter->list_entry.prio;
258 plist_add(&waiter->pi_list_entry, &task->pi_waiters);
259 __rt_mutex_adjust_prio(task);
261 } else if (top_waiter == waiter) {
262 /* Deboost the owner */
263 plist_del(&waiter->pi_list_entry, &task->pi_waiters);
264 waiter = rt_mutex_top_waiter(lock);
265 waiter->pi_list_entry.prio = waiter->list_entry.prio;
266 plist_add(&waiter->pi_list_entry, &task->pi_waiters);
267 __rt_mutex_adjust_prio(task);
270 get_task_struct(task);
271 spin_unlock_irqrestore(&task->pi_lock, flags);
273 top_waiter = rt_mutex_top_waiter(lock);
274 spin_unlock(&lock->wait_lock);
276 if (!detect_deadlock && waiter != top_waiter)
277 goto out_put_task;
279 goto again;
281 out_unlock_pi:
282 spin_unlock_irqrestore(&task->pi_lock, flags);
283 out_put_task:
284 put_task_struct(task);
286 return ret;
290 * Optimization: check if we can steal the lock from the
291 * assigned pending owner [which might not have taken the
292 * lock yet]:
294 static inline int try_to_steal_lock(struct rt_mutex *lock)
296 struct task_struct *pendowner = rt_mutex_owner(lock);
297 struct rt_mutex_waiter *next;
298 unsigned long flags;
300 if (!rt_mutex_owner_pending(lock))
301 return 0;
303 if (pendowner == current)
304 return 1;
306 spin_lock_irqsave(&pendowner->pi_lock, flags);
307 if (current->prio >= pendowner->prio) {
308 spin_unlock_irqrestore(&pendowner->pi_lock, flags);
309 return 0;
313 * Check if a waiter is enqueued on the pending owners
314 * pi_waiters list. Remove it and readjust pending owners
315 * priority.
317 if (likely(!rt_mutex_has_waiters(lock))) {
318 spin_unlock_irqrestore(&pendowner->pi_lock, flags);
319 return 1;
322 /* No chain handling, pending owner is not blocked on anything: */
323 next = rt_mutex_top_waiter(lock);
324 plist_del(&next->pi_list_entry, &pendowner->pi_waiters);
325 __rt_mutex_adjust_prio(pendowner);
326 spin_unlock_irqrestore(&pendowner->pi_lock, flags);
329 * We are going to steal the lock and a waiter was
330 * enqueued on the pending owners pi_waiters queue. So
331 * we have to enqueue this waiter into
332 * current->pi_waiters list. This covers the case,
333 * where current is boosted because it holds another
334 * lock and gets unboosted because the booster is
335 * interrupted, so we would delay a waiter with higher
336 * priority as current->normal_prio.
338 * Note: in the rare case of a SCHED_OTHER task changing
339 * its priority and thus stealing the lock, next->task
340 * might be current:
342 if (likely(next->task != current)) {
343 spin_lock_irqsave(&current->pi_lock, flags);
344 plist_add(&next->pi_list_entry, &current->pi_waiters);
345 __rt_mutex_adjust_prio(current);
346 spin_unlock_irqrestore(&current->pi_lock, flags);
348 return 1;
352 * Try to take an rt-mutex
354 * This fails
355 * - when the lock has a real owner
356 * - when a different pending owner exists and has higher priority than current
358 * Must be called with lock->wait_lock held.
360 static int try_to_take_rt_mutex(struct rt_mutex *lock)
363 * We have to be careful here if the atomic speedups are
364 * enabled, such that, when
365 * - no other waiter is on the lock
366 * - the lock has been released since we did the cmpxchg
367 * the lock can be released or taken while we are doing the
368 * checks and marking the lock with RT_MUTEX_HAS_WAITERS.
370 * The atomic acquire/release aware variant of
371 * mark_rt_mutex_waiters uses a cmpxchg loop. After setting
372 * the WAITERS bit, the atomic release / acquire can not
373 * happen anymore and lock->wait_lock protects us from the
374 * non-atomic case.
376 * Note, that this might set lock->owner =
377 * RT_MUTEX_HAS_WAITERS in the case the lock is not contended
378 * any more. This is fixed up when we take the ownership.
379 * This is the transitional state explained at the top of this file.
381 mark_rt_mutex_waiters(lock);
383 if (rt_mutex_owner(lock) && !try_to_steal_lock(lock))
384 return 0;
386 /* We got the lock. */
387 debug_rt_mutex_lock(lock);
389 rt_mutex_set_owner(lock, current, 0);
391 rt_mutex_deadlock_account_lock(lock, current);
393 return 1;
397 * Task blocks on lock.
399 * Prepare waiter and propagate pi chain
401 * This must be called with lock->wait_lock held.
403 static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
404 struct rt_mutex_waiter *waiter,
405 int detect_deadlock)
407 struct task_struct *owner = rt_mutex_owner(lock);
408 struct rt_mutex_waiter *top_waiter = waiter;
409 unsigned long flags;
410 int boost = 0, res;
412 spin_lock_irqsave(&current->pi_lock, flags);
413 __rt_mutex_adjust_prio(current);
414 waiter->task = current;
415 waiter->lock = lock;
416 plist_node_init(&waiter->list_entry, current->prio);
417 plist_node_init(&waiter->pi_list_entry, current->prio);
419 /* Get the top priority waiter on the lock */
420 if (rt_mutex_has_waiters(lock))
421 top_waiter = rt_mutex_top_waiter(lock);
422 plist_add(&waiter->list_entry, &lock->wait_list);
424 current->pi_blocked_on = waiter;
426 spin_unlock_irqrestore(&current->pi_lock, flags);
428 if (waiter == rt_mutex_top_waiter(lock)) {
429 spin_lock_irqsave(&owner->pi_lock, flags);
430 plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters);
431 plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
433 __rt_mutex_adjust_prio(owner);
434 if (owner->pi_blocked_on) {
435 boost = 1;
436 /* gets dropped in rt_mutex_adjust_prio_chain()! */
437 get_task_struct(owner);
439 spin_unlock_irqrestore(&owner->pi_lock, flags);
441 else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) {
442 spin_lock_irqsave(&owner->pi_lock, flags);
443 if (owner->pi_blocked_on) {
444 boost = 1;
445 /* gets dropped in rt_mutex_adjust_prio_chain()! */
446 get_task_struct(owner);
448 spin_unlock_irqrestore(&owner->pi_lock, flags);
450 if (!boost)
451 return 0;
453 spin_unlock(&lock->wait_lock);
455 res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
456 current);
458 spin_lock(&lock->wait_lock);
460 return res;
464 * Wake up the next waiter on the lock.
466 * Remove the top waiter from the current tasks waiter list and from
467 * the lock waiter list. Set it as pending owner. Then wake it up.
469 * Called with lock->wait_lock held.
471 static void wakeup_next_waiter(struct rt_mutex *lock)
473 struct rt_mutex_waiter *waiter;
474 struct task_struct *pendowner;
475 unsigned long flags;
477 spin_lock_irqsave(&current->pi_lock, flags);
479 waiter = rt_mutex_top_waiter(lock);
480 plist_del(&waiter->list_entry, &lock->wait_list);
483 * Remove it from current->pi_waiters. We do not adjust a
484 * possible priority boost right now. We execute wakeup in the
485 * boosted mode and go back to normal after releasing
486 * lock->wait_lock.
488 plist_del(&waiter->pi_list_entry, &current->pi_waiters);
489 pendowner = waiter->task;
490 waiter->task = NULL;
492 rt_mutex_set_owner(lock, pendowner, RT_MUTEX_OWNER_PENDING);
494 spin_unlock_irqrestore(&current->pi_lock, flags);
497 * Clear the pi_blocked_on variable and enqueue a possible
498 * waiter into the pi_waiters list of the pending owner. This
499 * prevents that in case the pending owner gets unboosted a
500 * waiter with higher priority than pending-owner->normal_prio
501 * is blocked on the unboosted (pending) owner.
503 spin_lock_irqsave(&pendowner->pi_lock, flags);
505 WARN_ON(!pendowner->pi_blocked_on);
506 WARN_ON(pendowner->pi_blocked_on != waiter);
507 WARN_ON(pendowner->pi_blocked_on->lock != lock);
509 pendowner->pi_blocked_on = NULL;
511 if (rt_mutex_has_waiters(lock)) {
512 struct rt_mutex_waiter *next;
514 next = rt_mutex_top_waiter(lock);
515 plist_add(&next->pi_list_entry, &pendowner->pi_waiters);
517 spin_unlock_irqrestore(&pendowner->pi_lock, flags);
519 wake_up_process(pendowner);
523 * Remove a waiter from a lock
525 * Must be called with lock->wait_lock held
527 static void remove_waiter(struct rt_mutex *lock,
528 struct rt_mutex_waiter *waiter)
530 int first = (waiter == rt_mutex_top_waiter(lock));
531 struct task_struct *owner = rt_mutex_owner(lock);
532 unsigned long flags;
533 int boost = 0;
535 spin_lock_irqsave(&current->pi_lock, flags);
536 plist_del(&waiter->list_entry, &lock->wait_list);
537 waiter->task = NULL;
538 current->pi_blocked_on = NULL;
539 spin_unlock_irqrestore(&current->pi_lock, flags);
541 if (first && owner != current) {
543 spin_lock_irqsave(&owner->pi_lock, flags);
545 plist_del(&waiter->pi_list_entry, &owner->pi_waiters);
547 if (rt_mutex_has_waiters(lock)) {
548 struct rt_mutex_waiter *next;
550 next = rt_mutex_top_waiter(lock);
551 plist_add(&next->pi_list_entry, &owner->pi_waiters);
553 __rt_mutex_adjust_prio(owner);
555 if (owner->pi_blocked_on) {
556 boost = 1;
557 /* gets dropped in rt_mutex_adjust_prio_chain()! */
558 get_task_struct(owner);
560 spin_unlock_irqrestore(&owner->pi_lock, flags);
563 WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
565 if (!boost)
566 return;
568 spin_unlock(&lock->wait_lock);
570 rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);
572 spin_lock(&lock->wait_lock);
576 * Recheck the pi chain, in case we got a priority setting
578 * Called from sched_setscheduler
580 void rt_mutex_adjust_pi(struct task_struct *task)
582 struct rt_mutex_waiter *waiter;
583 unsigned long flags;
585 spin_lock_irqsave(&task->pi_lock, flags);
587 waiter = task->pi_blocked_on;
588 if (!waiter || waiter->list_entry.prio == task->prio) {
589 spin_unlock_irqrestore(&task->pi_lock, flags);
590 return;
593 /* gets dropped in rt_mutex_adjust_prio_chain()! */
594 get_task_struct(task);
595 spin_unlock_irqrestore(&task->pi_lock, flags);
597 rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task);
601 * Slow path lock function:
603 static int __sched
604 rt_mutex_slowlock(struct rt_mutex *lock, int state,
605 struct hrtimer_sleeper *timeout,
606 int detect_deadlock)
608 struct rt_mutex_waiter waiter;
609 int ret = 0;
611 debug_rt_mutex_init_waiter(&waiter);
612 waiter.task = NULL;
614 spin_lock(&lock->wait_lock);
616 /* Try to acquire the lock again: */
617 if (try_to_take_rt_mutex(lock)) {
618 spin_unlock(&lock->wait_lock);
619 return 0;
622 set_current_state(state);
624 /* Setup the timer, when timeout != NULL */
625 if (unlikely(timeout))
626 hrtimer_start(&timeout->timer, timeout->timer.expires,
627 HRTIMER_ABS);
629 for (;;) {
630 /* Try to acquire the lock: */
631 if (try_to_take_rt_mutex(lock))
632 break;
635 * TASK_INTERRUPTIBLE checks for signals and
636 * timeout. Ignored otherwise.
638 if (unlikely(state == TASK_INTERRUPTIBLE)) {
639 /* Signal pending? */
640 if (signal_pending(current))
641 ret = -EINTR;
642 if (timeout && !timeout->task)
643 ret = -ETIMEDOUT;
644 if (ret)
645 break;
649 * waiter.task is NULL the first time we come here and
650 * when we have been woken up by the previous owner
651 * but the lock got stolen by a higher prio task.
653 if (!waiter.task) {
654 ret = task_blocks_on_rt_mutex(lock, &waiter,
655 detect_deadlock);
657 * If we got woken up by the owner then start loop
658 * all over without going into schedule to try
659 * to get the lock now:
661 if (unlikely(!waiter.task))
662 continue;
664 if (unlikely(ret))
665 break;
668 spin_unlock(&lock->wait_lock);
670 debug_rt_mutex_print_deadlock(&waiter);
672 if (waiter.task)
673 schedule_rt_mutex(lock);
675 spin_lock(&lock->wait_lock);
676 set_current_state(state);
679 set_current_state(TASK_RUNNING);
681 if (unlikely(waiter.task))
682 remove_waiter(lock, &waiter);
685 * try_to_take_rt_mutex() sets the waiter bit
686 * unconditionally. We might have to fix that up.
688 fixup_rt_mutex_waiters(lock);
690 spin_unlock(&lock->wait_lock);
692 /* Remove pending timer: */
693 if (unlikely(timeout))
694 hrtimer_cancel(&timeout->timer);
697 * Readjust priority, when we did not get the lock. We might
698 * have been the pending owner and boosted. Since we did not
699 * take the lock, the PI boost has to go.
701 if (unlikely(ret))
702 rt_mutex_adjust_prio(current);
704 debug_rt_mutex_free_waiter(&waiter);
706 return ret;
710 * Slow path try-lock function:
712 static inline int
713 rt_mutex_slowtrylock(struct rt_mutex *lock)
715 int ret = 0;
717 spin_lock(&lock->wait_lock);
719 if (likely(rt_mutex_owner(lock) != current)) {
721 ret = try_to_take_rt_mutex(lock);
723 * try_to_take_rt_mutex() sets the lock waiters
724 * bit unconditionally. Clean this up.
726 fixup_rt_mutex_waiters(lock);
729 spin_unlock(&lock->wait_lock);
731 return ret;
735 * Slow path to release a rt-mutex:
737 static void __sched
738 rt_mutex_slowunlock(struct rt_mutex *lock)
740 spin_lock(&lock->wait_lock);
742 debug_rt_mutex_unlock(lock);
744 rt_mutex_deadlock_account_unlock(current);
746 if (!rt_mutex_has_waiters(lock)) {
747 lock->owner = NULL;
748 spin_unlock(&lock->wait_lock);
749 return;
752 wakeup_next_waiter(lock);
754 spin_unlock(&lock->wait_lock);
756 /* Undo pi boosting if necessary: */
757 rt_mutex_adjust_prio(current);
761 * debug aware fast / slowpath lock,trylock,unlock
763 * The atomic acquire/release ops are compiled away, when either the
764 * architecture does not support cmpxchg or when debugging is enabled.
766 static inline int
767 rt_mutex_fastlock(struct rt_mutex *lock, int state,
768 int detect_deadlock,
769 int (*slowfn)(struct rt_mutex *lock, int state,
770 struct hrtimer_sleeper *timeout,
771 int detect_deadlock))
773 if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
774 rt_mutex_deadlock_account_lock(lock, current);
775 return 0;
776 } else
777 return slowfn(lock, state, NULL, detect_deadlock);
780 static inline int
781 rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
782 struct hrtimer_sleeper *timeout, int detect_deadlock,
783 int (*slowfn)(struct rt_mutex *lock, int state,
784 struct hrtimer_sleeper *timeout,
785 int detect_deadlock))
787 if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
788 rt_mutex_deadlock_account_lock(lock, current);
789 return 0;
790 } else
791 return slowfn(lock, state, timeout, detect_deadlock);
794 static inline int
795 rt_mutex_fasttrylock(struct rt_mutex *lock,
796 int (*slowfn)(struct rt_mutex *lock))
798 if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
799 rt_mutex_deadlock_account_lock(lock, current);
800 return 1;
802 return slowfn(lock);
805 static inline void
806 rt_mutex_fastunlock(struct rt_mutex *lock,
807 void (*slowfn)(struct rt_mutex *lock))
809 if (likely(rt_mutex_cmpxchg(lock, current, NULL)))
810 rt_mutex_deadlock_account_unlock(current);
811 else
812 slowfn(lock);
816 * rt_mutex_lock - lock a rt_mutex
818 * @lock: the rt_mutex to be locked
820 void __sched rt_mutex_lock(struct rt_mutex *lock)
822 might_sleep();
824 rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock);
826 EXPORT_SYMBOL_GPL(rt_mutex_lock);
829 * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
831 * @lock: the rt_mutex to be locked
832 * @detect_deadlock: deadlock detection on/off
834 * Returns:
835 * 0 on success
836 * -EINTR when interrupted by a signal
837 * -EDEADLK when the lock would deadlock (when deadlock detection is on)
839 int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock,
840 int detect_deadlock)
842 might_sleep();
844 return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE,
845 detect_deadlock, rt_mutex_slowlock);
847 EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
850 * rt_mutex_lock_interruptible_ktime - lock a rt_mutex interruptible
851 * the timeout structure is provided
852 * by the caller
854 * @lock: the rt_mutex to be locked
855 * @timeout: timeout structure or NULL (no timeout)
856 * @detect_deadlock: deadlock detection on/off
858 * Returns:
859 * 0 on success
860 * -EINTR when interrupted by a signal
861 * -ETIMEOUT when the timeout expired
862 * -EDEADLK when the lock would deadlock (when deadlock detection is on)
865 rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout,
866 int detect_deadlock)
868 might_sleep();
870 return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
871 detect_deadlock, rt_mutex_slowlock);
873 EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
876 * rt_mutex_trylock - try to lock a rt_mutex
878 * @lock: the rt_mutex to be locked
880 * Returns 1 on success and 0 on contention
882 int __sched rt_mutex_trylock(struct rt_mutex *lock)
884 return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
886 EXPORT_SYMBOL_GPL(rt_mutex_trylock);
889 * rt_mutex_unlock - unlock a rt_mutex
891 * @lock: the rt_mutex to be unlocked
893 void __sched rt_mutex_unlock(struct rt_mutex *lock)
895 rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
897 EXPORT_SYMBOL_GPL(rt_mutex_unlock);
899 /***
900 * rt_mutex_destroy - mark a mutex unusable
901 * @lock: the mutex to be destroyed
903 * This function marks the mutex uninitialized, and any subsequent
904 * use of the mutex is forbidden. The mutex must not be locked when
905 * this function is called.
907 void rt_mutex_destroy(struct rt_mutex *lock)
909 WARN_ON(rt_mutex_is_locked(lock));
910 #ifdef CONFIG_DEBUG_RT_MUTEXES
911 lock->magic = NULL;
912 #endif
915 EXPORT_SYMBOL_GPL(rt_mutex_destroy);
918 * __rt_mutex_init - initialize the rt lock
920 * @lock: the rt lock to be initialized
922 * Initialize the rt lock to unlocked state.
924 * Initializing of a locked rt lock is not allowed
926 void __rt_mutex_init(struct rt_mutex *lock, const char *name)
928 lock->owner = NULL;
929 spin_lock_init(&lock->wait_lock);
930 plist_head_init(&lock->wait_list, &lock->wait_lock);
932 debug_rt_mutex_init(lock, name);
934 EXPORT_SYMBOL_GPL(__rt_mutex_init);
937 * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
938 * proxy owner
940 * @lock: the rt_mutex to be locked
941 * @proxy_owner:the task to set as owner
943 * No locking. Caller has to do serializing itself
944 * Special API call for PI-futex support
946 void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
947 struct task_struct *proxy_owner)
949 __rt_mutex_init(lock, NULL);
950 debug_rt_mutex_proxy_lock(lock, proxy_owner);
951 rt_mutex_set_owner(lock, proxy_owner, 0);
952 rt_mutex_deadlock_account_lock(lock, proxy_owner);
956 * rt_mutex_proxy_unlock - release a lock on behalf of owner
958 * @lock: the rt_mutex to be locked
960 * No locking. Caller has to do serializing itself
961 * Special API call for PI-futex support
963 void rt_mutex_proxy_unlock(struct rt_mutex *lock,
964 struct task_struct *proxy_owner)
966 debug_rt_mutex_proxy_unlock(lock);
967 rt_mutex_set_owner(lock, NULL, 0);
968 rt_mutex_deadlock_account_unlock(proxy_owner);
972 * rt_mutex_next_owner - return the next owner of the lock
974 * @lock: the rt lock query
976 * Returns the next owner of the lock or NULL
978 * Caller has to serialize against other accessors to the lock
979 * itself.
981 * Special API call for PI-futex support
983 struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
985 if (!rt_mutex_has_waiters(lock))
986 return NULL;
988 return rt_mutex_top_waiter(lock)->task;