Initial commit
[wrt350n-kernel.git] / arch / alpha / kernel / semaphore.c
blob8d2982aa1b8db8f3fdf8fbbf29321cb3b4dea010
1 /*
2 * Alpha semaphore implementation.
4 * (C) Copyright 1996 Linus Torvalds
5 * (C) Copyright 1999, 2000 Richard Henderson
6 */
8 #include <linux/errno.h>
9 #include <linux/sched.h>
10 #include <linux/init.h>
13 * This is basically the PPC semaphore scheme ported to use
14 * the Alpha ll/sc sequences, so see the PPC code for
15 * credits.
19 * Atomically update sem->count.
20 * This does the equivalent of the following:
22 * old_count = sem->count;
23 * tmp = MAX(old_count, 0) + incr;
24 * sem->count = tmp;
25 * return old_count;
27 static inline int __sem_update_count(struct semaphore *sem, int incr)
29 long old_count, tmp = 0;
31 __asm__ __volatile__(
32 "1: ldl_l %0,%2\n"
33 " cmovgt %0,%0,%1\n"
34 " addl %1,%3,%1\n"
35 " stl_c %1,%2\n"
36 " beq %1,2f\n"
37 " mb\n"
38 ".subsection 2\n"
39 "2: br 1b\n"
40 ".previous"
41 : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
42 : "Ir" (incr), "1" (tmp), "m" (sem->count));
44 return old_count;
48 * Perform the "down" function. Return zero for semaphore acquired,
49 * return negative for signalled out of the function.
51 * If called from down, the return is ignored and the wait loop is
52 * not interruptible. This means that a task waiting on a semaphore
53 * using "down()" cannot be killed until someone does an "up()" on
54 * the semaphore.
56 * If called from down_interruptible, the return value gets checked
57 * upon return. If the return value is negative then the task continues
58 * with the negative value in the return register (it can be tested by
59 * the caller).
61 * Either form may be used in conjunction with "up()".
64 void __sched
65 __down_failed(struct semaphore *sem)
67 struct task_struct *tsk = current;
68 DECLARE_WAITQUEUE(wait, tsk);
70 #ifdef CONFIG_DEBUG_SEMAPHORE
71 printk("%s(%d): down failed(%p)\n",
72 tsk->comm, task_pid_nr(tsk), sem);
73 #endif
75 tsk->state = TASK_UNINTERRUPTIBLE;
76 wmb();
77 add_wait_queue_exclusive(&sem->wait, &wait);
80 * Try to get the semaphore. If the count is > 0, then we've
81 * got the semaphore; we decrement count and exit the loop.
82 * If the count is 0 or negative, we set it to -1, indicating
83 * that we are asleep, and then sleep.
85 while (__sem_update_count(sem, -1) <= 0) {
86 schedule();
87 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
89 remove_wait_queue(&sem->wait, &wait);
90 tsk->state = TASK_RUNNING;
93 * If there are any more sleepers, wake one of them up so
94 * that it can either get the semaphore, or set count to -1
95 * indicating that there are still processes sleeping.
97 wake_up(&sem->wait);
99 #ifdef CONFIG_DEBUG_SEMAPHORE
100 printk("%s(%d): down acquired(%p)\n",
101 tsk->comm, task_pid_nr(tsk), sem);
102 #endif
105 int __sched
106 __down_failed_interruptible(struct semaphore *sem)
108 struct task_struct *tsk = current;
109 DECLARE_WAITQUEUE(wait, tsk);
110 long ret = 0;
112 #ifdef CONFIG_DEBUG_SEMAPHORE
113 printk("%s(%d): down failed(%p)\n",
114 tsk->comm, task_pid_nr(tsk), sem);
115 #endif
117 tsk->state = TASK_INTERRUPTIBLE;
118 wmb();
119 add_wait_queue_exclusive(&sem->wait, &wait);
121 while (__sem_update_count(sem, -1) <= 0) {
122 if (signal_pending(current)) {
124 * A signal is pending - give up trying.
125 * Set sem->count to 0 if it is negative,
126 * since we are no longer sleeping.
128 __sem_update_count(sem, 0);
129 ret = -EINTR;
130 break;
132 schedule();
133 set_task_state(tsk, TASK_INTERRUPTIBLE);
136 remove_wait_queue(&sem->wait, &wait);
137 tsk->state = TASK_RUNNING;
138 wake_up(&sem->wait);
140 #ifdef CONFIG_DEBUG_SEMAPHORE
141 printk("%s(%d): down %s(%p)\n",
142 current->comm, task_pid_nr(current),
143 (ret < 0 ? "interrupted" : "acquired"), sem);
144 #endif
145 return ret;
148 void
149 __up_wakeup(struct semaphore *sem)
152 * Note that we incremented count in up() before we came here,
153 * but that was ineffective since the result was <= 0, and
154 * any negative value of count is equivalent to 0.
155 * This ends up setting count to 1, unless count is now > 0
156 * (i.e. because some other cpu has called up() in the meantime),
157 * in which case we just increment count.
159 __sem_update_count(sem, 1);
160 wake_up(&sem->wait);
163 void __sched
164 down(struct semaphore *sem)
166 #ifdef WAITQUEUE_DEBUG
167 CHECK_MAGIC(sem->__magic);
168 #endif
169 #ifdef CONFIG_DEBUG_SEMAPHORE
170 printk("%s(%d): down(%p) <count=%d> from %p\n",
171 current->comm, task_pid_nr(current), sem,
172 atomic_read(&sem->count), __builtin_return_address(0));
173 #endif
174 __down(sem);
177 int __sched
178 down_interruptible(struct semaphore *sem)
180 #ifdef WAITQUEUE_DEBUG
181 CHECK_MAGIC(sem->__magic);
182 #endif
183 #ifdef CONFIG_DEBUG_SEMAPHORE
184 printk("%s(%d): down(%p) <count=%d> from %p\n",
185 current->comm, task_pid_nr(current), sem,
186 atomic_read(&sem->count), __builtin_return_address(0));
187 #endif
188 return __down_interruptible(sem);
192 down_trylock(struct semaphore *sem)
194 int ret;
196 #ifdef WAITQUEUE_DEBUG
197 CHECK_MAGIC(sem->__magic);
198 #endif
200 ret = __down_trylock(sem);
202 #ifdef CONFIG_DEBUG_SEMAPHORE
203 printk("%s(%d): down_trylock %s from %p\n",
204 current->comm, task_pid_nr(current),
205 ret ? "failed" : "acquired",
206 __builtin_return_address(0));
207 #endif
209 return ret;
212 void
213 up(struct semaphore *sem)
215 #ifdef WAITQUEUE_DEBUG
216 CHECK_MAGIC(sem->__magic);
217 #endif
218 #ifdef CONFIG_DEBUG_SEMAPHORE
219 printk("%s(%d): up(%p) <count=%d> from %p\n",
220 current->comm, task_pid_nr(current), sem,
221 atomic_read(&sem->count), __builtin_return_address(0));
222 #endif
223 __up(sem);