2 * arch/v850/kernel/semaphore.c -- Semaphore support
4 * Copyright (C) 1998-2000 IBM Corporation
5 * Copyright (C) 1999 Linus Torvalds
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
11 * This file is a copy of the s390 version, arch/s390/kernel/semaphore.c
12 * Author(s): Martin Schwidefsky
13 * which was derived from the i386 version, linux/arch/i386/kernel/semaphore.c
16 #include <linux/errno.h>
17 #include <linux/sched.h>
18 #include <linux/init.h>
20 #include <asm/semaphore.h>
23 * Semaphores are implemented using a two-way counter:
24 * The "count" variable is decremented for each process
25 * that tries to acquire the semaphore, while the "sleeping"
26 * variable is a count of such acquires.
28 * Notably, the inline "up()" and "down()" functions can
29 * efficiently test if they need to do any extra work (up
30 * needs to do something only if count was negative before
31 * the increment operation.
33 * "sleeping" and the contention routine ordering is
34 * protected by the semaphore spinlock.
36 * Note that these functions are only called when there is
37 * contention on the lock, and as such all this is the
38 * "non-critical" part of the whole semaphore business. The
39 * critical part is the inline stuff in <asm/semaphore.h>
40 * where we want to avoid any extra jumps and calls.
45 * - only on a boundary condition do we need to care. When we go
46 * from a negative count to a non-negative, we wake people up.
47 * - when we go from a non-negative count to a negative do we
48 * (a) synchronize with the "sleeper" count and (b) make sure
49 * that we're on the wakeup list before we synchronize so that
50 * we cannot lose wakeup events.
53 void __up(struct semaphore
*sem
)
58 static DEFINE_SPINLOCK(semaphore_lock
);
60 void __sched
__down(struct semaphore
* sem
)
62 struct task_struct
*tsk
= current
;
63 DECLARE_WAITQUEUE(wait
, tsk
);
64 tsk
->state
= TASK_UNINTERRUPTIBLE
;
65 add_wait_queue_exclusive(&sem
->wait
, &wait
);
67 spin_lock_irq(&semaphore_lock
);
70 int sleepers
= sem
->sleepers
;
73 * Add "everybody else" into it. They aren't
74 * playing, because we own the spinlock.
76 if (!atomic_add_negative(sleepers
- 1, &sem
->count
)) {
80 sem
->sleepers
= 1; /* us - see -1 above */
81 spin_unlock_irq(&semaphore_lock
);
84 tsk
->state
= TASK_UNINTERRUPTIBLE
;
85 spin_lock_irq(&semaphore_lock
);
87 spin_unlock_irq(&semaphore_lock
);
88 remove_wait_queue(&sem
->wait
, &wait
);
89 tsk
->state
= TASK_RUNNING
;
93 int __sched
__down_interruptible(struct semaphore
* sem
)
96 struct task_struct
*tsk
= current
;
97 DECLARE_WAITQUEUE(wait
, tsk
);
98 tsk
->state
= TASK_INTERRUPTIBLE
;
99 add_wait_queue_exclusive(&sem
->wait
, &wait
);
101 spin_lock_irq(&semaphore_lock
);
104 int sleepers
= sem
->sleepers
;
107 * With signals pending, this turns into
108 * the trylock failure case - we won't be
109 * sleeping, and we* can't get the lock as
110 * it has contention. Just correct the count
113 if (signal_pending(current
)) {
116 atomic_add(sleepers
, &sem
->count
);
121 * Add "everybody else" into it. They aren't
122 * playing, because we own the spinlock. The
123 * "-1" is because we're still hoping to get
126 if (!atomic_add_negative(sleepers
- 1, &sem
->count
)) {
130 sem
->sleepers
= 1; /* us - see -1 above */
131 spin_unlock_irq(&semaphore_lock
);
134 tsk
->state
= TASK_INTERRUPTIBLE
;
135 spin_lock_irq(&semaphore_lock
);
137 spin_unlock_irq(&semaphore_lock
);
138 tsk
->state
= TASK_RUNNING
;
139 remove_wait_queue(&sem
->wait
, &wait
);
145 * Trylock failed - make sure we correct for
146 * having decremented the count.
148 int __down_trylock(struct semaphore
* sem
)
153 spin_lock_irqsave(&semaphore_lock
, flags
);
154 sleepers
= sem
->sleepers
+ 1;
158 * Add "everybody else" and us into it. They aren't
159 * playing, because we own the spinlock.
161 if (!atomic_add_negative(sleepers
, &sem
->count
))
164 spin_unlock_irqrestore(&semaphore_lock
, flags
);