[PATCH] leak tracking for kmalloc_node
[pv_ops_mirror.git] / kernel / mutex.c
blob8c71cf72a497de63ec5672f3f5f8f728fbb9f9df
1 /*
2 * kernel/mutex.c
4 * Mutexes: blocking mutual exclusion locks
6 * Started by Ingo Molnar:
8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11 * David Howells for suggestions and improvements.
13 * Also see Documentation/mutex-design.txt.
15 #include <linux/mutex.h>
16 #include <linux/sched.h>
17 #include <linux/module.h>
18 #include <linux/spinlock.h>
19 #include <linux/interrupt.h>
20 #include <linux/debug_locks.h>
23 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
24 * which forces all calls into the slowpath:
26 #ifdef CONFIG_DEBUG_MUTEXES
27 # include "mutex-debug.h"
28 # include <asm-generic/mutex-null.h>
29 #else
30 # include "mutex.h"
31 # include <asm/mutex.h>
32 #endif
34 /***
35 * mutex_init - initialize the mutex
36 * @lock: the mutex to be initialized
38 * Initialize the mutex to unlocked state.
40 * It is not allowed to initialize an already locked mutex.
42 void
43 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
45 atomic_set(&lock->count, 1);
46 spin_lock_init(&lock->wait_lock);
47 INIT_LIST_HEAD(&lock->wait_list);
49 debug_mutex_init(lock, name, key);
52 EXPORT_SYMBOL(__mutex_init);
55 * We split the mutex lock/unlock logic into separate fastpath and
56 * slowpath functions, to reduce the register pressure on the fastpath.
57 * We also put the fastpath first in the kernel image, to make sure the
58 * branch is predicted by the CPU as default-untaken.
60 static void fastcall noinline __sched
61 __mutex_lock_slowpath(atomic_t *lock_count);
63 /***
64 * mutex_lock - acquire the mutex
65 * @lock: the mutex to be acquired
67 * Lock the mutex exclusively for this task. If the mutex is not
68 * available right now, it will sleep until it can get it.
70 * The mutex must later on be released by the same task that
71 * acquired it. Recursive locking is not allowed. The task
72 * may not exit without first unlocking the mutex. Also, kernel
73 * memory where the mutex resides mutex must not be freed with
74 * the mutex still locked. The mutex must first be initialized
75 * (or statically defined) before it can be locked. memset()-ing
76 * the mutex to 0 is not allowed.
78 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
79 * checks that will enforce the restrictions and will also do
80 * deadlock debugging. )
82 * This function is similar to (but not equivalent to) down().
84 void inline fastcall __sched mutex_lock(struct mutex *lock)
86 might_sleep();
88 * The locking fastpath is the 1->0 transition from
89 * 'unlocked' into 'locked' state.
91 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
94 EXPORT_SYMBOL(mutex_lock);
96 static void fastcall noinline __sched
97 __mutex_unlock_slowpath(atomic_t *lock_count);
99 /***
100 * mutex_unlock - release the mutex
101 * @lock: the mutex to be released
103 * Unlock a mutex that has been locked by this task previously.
105 * This function must not be used in interrupt context. Unlocking
106 * of a not locked mutex is not allowed.
108 * This function is similar to (but not equivalent to) up().
110 void fastcall __sched mutex_unlock(struct mutex *lock)
113 * The unlocking fastpath is the 0->1 transition from 'locked'
114 * into 'unlocked' state:
116 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
119 EXPORT_SYMBOL(mutex_unlock);
122 * Lock a mutex (possibly interruptible), slowpath:
124 static inline int __sched
125 __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
127 struct task_struct *task = current;
128 struct mutex_waiter waiter;
129 unsigned int old_val;
130 unsigned long flags;
132 spin_lock_mutex(&lock->wait_lock, flags);
134 debug_mutex_lock_common(lock, &waiter);
135 mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
136 debug_mutex_add_waiter(lock, &waiter, task->thread_info);
138 /* add waiting tasks to the end of the waitqueue (FIFO): */
139 list_add_tail(&waiter.list, &lock->wait_list);
140 waiter.task = task;
142 for (;;) {
144 * Lets try to take the lock again - this is needed even if
145 * we get here for the first time (shortly after failing to
146 * acquire the lock), to make sure that we get a wakeup once
147 * it's unlocked. Later on, if we sleep, this is the
148 * operation that gives us the lock. We xchg it to -1, so
149 * that when we release the lock, we properly wake up the
150 * other waiters:
152 old_val = atomic_xchg(&lock->count, -1);
153 if (old_val == 1)
154 break;
157 * got a signal? (This code gets eliminated in the
158 * TASK_UNINTERRUPTIBLE case.)
160 if (unlikely(state == TASK_INTERRUPTIBLE &&
161 signal_pending(task))) {
162 mutex_remove_waiter(lock, &waiter, task->thread_info);
163 mutex_release(&lock->dep_map, 1, _RET_IP_);
164 spin_unlock_mutex(&lock->wait_lock, flags);
166 debug_mutex_free_waiter(&waiter);
167 return -EINTR;
169 __set_task_state(task, state);
171 /* didnt get the lock, go to sleep: */
172 spin_unlock_mutex(&lock->wait_lock, flags);
173 schedule();
174 spin_lock_mutex(&lock->wait_lock, flags);
177 /* got the lock - rejoice! */
178 mutex_remove_waiter(lock, &waiter, task->thread_info);
179 debug_mutex_set_owner(lock, task->thread_info);
181 /* set it to 0 if there are no waiters left: */
182 if (likely(list_empty(&lock->wait_list)))
183 atomic_set(&lock->count, 0);
185 spin_unlock_mutex(&lock->wait_lock, flags);
187 debug_mutex_free_waiter(&waiter);
189 return 0;
192 static void fastcall noinline __sched
193 __mutex_lock_slowpath(atomic_t *lock_count)
195 struct mutex *lock = container_of(lock_count, struct mutex, count);
197 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0);
200 #ifdef CONFIG_DEBUG_LOCK_ALLOC
201 void __sched
202 mutex_lock_nested(struct mutex *lock, unsigned int subclass)
204 might_sleep();
205 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass);
208 EXPORT_SYMBOL_GPL(mutex_lock_nested);
209 #endif
212 * Release the lock, slowpath:
214 static fastcall inline void
215 __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
217 struct mutex *lock = container_of(lock_count, struct mutex, count);
218 unsigned long flags;
220 spin_lock_mutex(&lock->wait_lock, flags);
221 mutex_release(&lock->dep_map, nested, _RET_IP_);
222 debug_mutex_unlock(lock);
225 * some architectures leave the lock unlocked in the fastpath failure
226 * case, others need to leave it locked. In the later case we have to
227 * unlock it here
229 if (__mutex_slowpath_needs_to_unlock())
230 atomic_set(&lock->count, 1);
232 if (!list_empty(&lock->wait_list)) {
233 /* get the first entry from the wait-list: */
234 struct mutex_waiter *waiter =
235 list_entry(lock->wait_list.next,
236 struct mutex_waiter, list);
238 debug_mutex_wake_waiter(lock, waiter);
240 wake_up_process(waiter->task);
243 debug_mutex_clear_owner(lock);
245 spin_unlock_mutex(&lock->wait_lock, flags);
249 * Release the lock, slowpath:
251 static fastcall noinline void
252 __mutex_unlock_slowpath(atomic_t *lock_count)
254 __mutex_unlock_common_slowpath(lock_count, 1);
258 * Here come the less common (and hence less performance-critical) APIs:
259 * mutex_lock_interruptible() and mutex_trylock().
261 static int fastcall noinline __sched
262 __mutex_lock_interruptible_slowpath(atomic_t *lock_count);
264 /***
265 * mutex_lock_interruptible - acquire the mutex, interruptable
266 * @lock: the mutex to be acquired
268 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
269 * been acquired or sleep until the mutex becomes available. If a
270 * signal arrives while waiting for the lock then this function
271 * returns -EINTR.
273 * This function is similar to (but not equivalent to) down_interruptible().
275 int fastcall __sched mutex_lock_interruptible(struct mutex *lock)
277 might_sleep();
278 return __mutex_fastpath_lock_retval
279 (&lock->count, __mutex_lock_interruptible_slowpath);
282 EXPORT_SYMBOL(mutex_lock_interruptible);
284 static int fastcall noinline __sched
285 __mutex_lock_interruptible_slowpath(atomic_t *lock_count)
287 struct mutex *lock = container_of(lock_count, struct mutex, count);
289 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0);
293 * Spinlock based trylock, we take the spinlock and check whether we
294 * can get the lock:
296 static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
298 struct mutex *lock = container_of(lock_count, struct mutex, count);
299 unsigned long flags;
300 int prev;
302 spin_lock_mutex(&lock->wait_lock, flags);
304 prev = atomic_xchg(&lock->count, -1);
305 if (likely(prev == 1)) {
306 debug_mutex_set_owner(lock, current_thread_info());
307 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
309 /* Set it back to 0 if there are no waiters: */
310 if (likely(list_empty(&lock->wait_list)))
311 atomic_set(&lock->count, 0);
313 spin_unlock_mutex(&lock->wait_lock, flags);
315 return prev == 1;
318 /***
319 * mutex_trylock - try acquire the mutex, without waiting
320 * @lock: the mutex to be acquired
322 * Try to acquire the mutex atomically. Returns 1 if the mutex
323 * has been acquired successfully, and 0 on contention.
325 * NOTE: this function follows the spin_trylock() convention, so
326 * it is negated to the down_trylock() return values! Be careful
327 * about this when converting semaphore users to mutexes.
329 * This function must not be used in interrupt context. The
330 * mutex must be released by the same task that acquired it.
332 int fastcall __sched mutex_trylock(struct mutex *lock)
334 return __mutex_fastpath_trylock(&lock->count,
335 __mutex_trylock_slowpath);
338 EXPORT_SYMBOL(mutex_trylock);