2 * IA-64 semaphore implementation (derived from x86 version).
4 * Copyright (C) 1999-2000, 2002 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
9 * Semaphores are implemented using a two-way counter: The "count"
10 * variable is decremented for each process that tries to acquire the
11 * semaphore, while the "sleepers" variable is a count of such
14 * Notably, the inline "up()" and "down()" functions can efficiently
15 * test if they need to do any extra work (up needs to do something
16 * only if count was negative before the increment operation.
18 * "sleeping" and the contention routine ordering is protected
19 * by the spinlock in the semaphore's waitqueue head.
21 * Note that these functions are only called when there is contention
22 * on the lock, and as such all this is the "non-critical" part of the
23 * whole semaphore business. The critical part is the inline stuff in
24 * <asm/semaphore.h> where we want to avoid any extra jumps and calls.
26 #include <linux/sched.h>
27 #include <linux/init.h>
29 #include <asm/errno.h>
30 #include <asm/semaphore.h>
34 * - Only on a boundary condition do we need to care. When we go
35 * from a negative count to a non-negative, we wake people up.
36 * - When we go from a non-negative count to a negative do we
37 * (a) synchronize with the "sleepers" count and (b) make sure
38 * that we're on the wakeup list before we synchronize so that
39 * we cannot lose wakeup events.
43 __up (struct semaphore
*sem
)
48 void __sched
__down (struct semaphore
*sem
)
50 struct task_struct
*tsk
= current
;
51 DECLARE_WAITQUEUE(wait
, tsk
);
54 tsk
->state
= TASK_UNINTERRUPTIBLE
;
55 spin_lock_irqsave(&sem
->wait
.lock
, flags
);
56 add_wait_queue_exclusive_locked(&sem
->wait
, &wait
);
60 int sleepers
= sem
->sleepers
;
63 * Add "everybody else" into it. They aren't
64 * playing, because we own the spinlock in
65 * the wait_queue_head.
67 if (!atomic_add_negative(sleepers
- 1, &sem
->count
)) {
71 sem
->sleepers
= 1; /* us - see -1 above */
72 spin_unlock_irqrestore(&sem
->wait
.lock
, flags
);
76 spin_lock_irqsave(&sem
->wait
.lock
, flags
);
77 tsk
->state
= TASK_UNINTERRUPTIBLE
;
79 remove_wait_queue_locked(&sem
->wait
, &wait
);
80 wake_up_locked(&sem
->wait
);
81 spin_unlock_irqrestore(&sem
->wait
.lock
, flags
);
82 tsk
->state
= TASK_RUNNING
;
85 int __sched
__down_interruptible (struct semaphore
* sem
)
88 struct task_struct
*tsk
= current
;
89 DECLARE_WAITQUEUE(wait
, tsk
);
92 tsk
->state
= TASK_INTERRUPTIBLE
;
93 spin_lock_irqsave(&sem
->wait
.lock
, flags
);
94 add_wait_queue_exclusive_locked(&sem
->wait
, &wait
);
98 int sleepers
= sem
->sleepers
;
101 * With signals pending, this turns into
102 * the trylock failure case - we won't be
103 * sleeping, and we* can't get the lock as
104 * it has contention. Just correct the count
107 if (signal_pending(current
)) {
110 atomic_add(sleepers
, &sem
->count
);
115 * Add "everybody else" into it. They aren't
116 * playing, because we own the spinlock in
117 * wait_queue_head. The "-1" is because we're
118 * still hoping to get the semaphore.
120 if (!atomic_add_negative(sleepers
- 1, &sem
->count
)) {
124 sem
->sleepers
= 1; /* us - see -1 above */
125 spin_unlock_irqrestore(&sem
->wait
.lock
, flags
);
129 spin_lock_irqsave(&sem
->wait
.lock
, flags
);
130 tsk
->state
= TASK_INTERRUPTIBLE
;
132 remove_wait_queue_locked(&sem
->wait
, &wait
);
133 wake_up_locked(&sem
->wait
);
134 spin_unlock_irqrestore(&sem
->wait
.lock
, flags
);
136 tsk
->state
= TASK_RUNNING
;
141 * Trylock failed - make sure we correct for having decremented the
145 __down_trylock (struct semaphore
*sem
)
150 spin_lock_irqsave(&sem
->wait
.lock
, flags
);
151 sleepers
= sem
->sleepers
+ 1;
155 * Add "everybody else" and us into it. They aren't
156 * playing, because we own the spinlock in the
159 if (!atomic_add_negative(sleepers
, &sem
->count
)) {
160 wake_up_locked(&sem
->wait
);
163 spin_unlock_irqrestore(&sem
->wait
.lock
, flags
);