2 * (C) Copyright 2007-2011 Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
4 * This file is released under the GPLv2. See the COPYING file for more
15 #include <interrupt.h>
20 spinlock_t queue_lock
;
21 struct list_head queue
;
22 struct lock_class
*lclass
;
25 #define UNLOCKED_MUTEX(name, lc) \
27 .state = ATOMIC_INIT(1), \
28 .queue = LIST_HEAD_INIT(name.queue), \
29 .queue_lock = SPIN_LOCK_UNLOCKED, \
33 static inline void mutex_init(mutex_t
*lock
, struct lock_class
*lc
)
35 atomic_set(&lock
->state
, 1);
36 INIT_LIST_HEAD(&lock
->queue
);
37 lock
->queue_lock
= SPIN_LOCK_UNLOCKED
;
41 extern void __mutex_lock_slow(mutex_t
*lock
);
43 static inline void __mutex_lock(mutex_t
*lock
, char *lname
)
46 * if we are not interruptable, we shouldn't call any functions that
47 * may sleep - e.g., mutex_lock
49 BUG_ON(!interruptable());
51 ldep_lock(lock
, lock
->lclass
, lname
);
53 if (unlikely(atomic_add_unless(&lock
->state
, -1, 0) == 0))
54 __mutex_lock_slow(lock
); /* the slow-path */
57 #define mutex_lock(l) __mutex_lock((l), #l)
59 static inline void __mutex_unlock(mutex_t
*lock
, char *lname
)
63 ldep_unlock(lock
, lname
);
65 spin_lock(&lock
->queue_lock
);
67 if (likely(list_empty(&lock
->queue
))) {
68 /* no one is waiting on the queue */
69 atomic_inc(&lock
->state
);
70 spin_unlock(&lock
->queue_lock
);
75 * someone is waiting on the queue, let's dequeue them & make them
78 task
= list_first_entry(&lock
->queue
, struct task
, blocked_list
);
79 list_del(&task
->blocked_list
);
80 spin_unlock(&lock
->queue_lock
);
85 #define mutex_unlock(l) __mutex_unlock((l), #l)