2 * linux/arch/m32r/semaphore.c
5 * M32R semaphore implementation.
7 * Copyright (c) 2002 - 2004 Hitoshi Yamamoto
11 * i386 semaphore implementation.
13 * (C) Copyright 1999 Linus Torvalds
15 * Portions Copyright 1999 Red Hat, Inc.
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
22 * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org>
24 #include <linux/sched.h>
25 #include <linux/err.h>
26 #include <linux/init.h>
27 #include <asm/semaphore.h>
30 * Semaphores are implemented using a two-way counter:
31 * The "count" variable is decremented for each process
32 * that tries to acquire the semaphore, while the "sleeping"
33 * variable is a count of such acquires.
35 * Notably, the inline "up()" and "down()" functions can
36 * efficiently test if they need to do any extra work (up
37 * needs to do something only if count was negative before
38 * the increment operation.
40 * "sleeping" and the contention routine ordering is protected
41 * by the spinlock in the semaphore's waitqueue head.
43 * Note that these functions are only called when there is
44 * contention on the lock, and as such all this is the
45 * "non-critical" part of the whole semaphore business. The
46 * critical part is the inline stuff in <asm/semaphore.h>
47 * where we want to avoid any extra jumps and calls.
52 * - only on a boundary condition do we need to care. When we go
53 * from a negative count to a non-negative, we wake people up.
54 * - when we go from a non-negative count to a negative do we
55 * (a) synchronize with the "sleeper" count and (b) make sure
56 * that we're on the wakeup list before we synchronize so that
57 * we cannot lose wakeup events.
60 asmlinkage
void __up(struct semaphore
*sem
)
65 asmlinkage
void __sched
__down(struct semaphore
* sem
)
67 struct task_struct
*tsk
= current
;
68 DECLARE_WAITQUEUE(wait
, tsk
);
71 tsk
->state
= TASK_UNINTERRUPTIBLE
;
72 spin_lock_irqsave(&sem
->wait
.lock
, flags
);
73 add_wait_queue_exclusive_locked(&sem
->wait
, &wait
);
77 int sleepers
= sem
->sleepers
;
80 * Add "everybody else" into it. They aren't
81 * playing, because we own the spinlock in
82 * the wait_queue_head.
84 if (!atomic_add_negative(sleepers
- 1, &sem
->count
)) {
88 sem
->sleepers
= 1; /* us - see -1 above */
89 spin_unlock_irqrestore(&sem
->wait
.lock
, flags
);
93 spin_lock_irqsave(&sem
->wait
.lock
, flags
);
94 tsk
->state
= TASK_UNINTERRUPTIBLE
;
96 remove_wait_queue_locked(&sem
->wait
, &wait
);
97 wake_up_locked(&sem
->wait
);
98 spin_unlock_irqrestore(&sem
->wait
.lock
, flags
);
99 tsk
->state
= TASK_RUNNING
;
102 asmlinkage
int __sched
__down_interruptible(struct semaphore
* sem
)
105 struct task_struct
*tsk
= current
;
106 DECLARE_WAITQUEUE(wait
, tsk
);
109 tsk
->state
= TASK_INTERRUPTIBLE
;
110 spin_lock_irqsave(&sem
->wait
.lock
, flags
);
111 add_wait_queue_exclusive_locked(&sem
->wait
, &wait
);
115 int sleepers
= sem
->sleepers
;
118 * With signals pending, this turns into
119 * the trylock failure case - we won't be
120 * sleeping, and we* can't get the lock as
121 * it has contention. Just correct the count
124 if (signal_pending(current
)) {
127 atomic_add(sleepers
, &sem
->count
);
132 * Add "everybody else" into it. They aren't
133 * playing, because we own the spinlock in
134 * wait_queue_head. The "-1" is because we're
135 * still hoping to get the semaphore.
137 if (!atomic_add_negative(sleepers
- 1, &sem
->count
)) {
141 sem
->sleepers
= 1; /* us - see -1 above */
142 spin_unlock_irqrestore(&sem
->wait
.lock
, flags
);
146 spin_lock_irqsave(&sem
->wait
.lock
, flags
);
147 tsk
->state
= TASK_INTERRUPTIBLE
;
149 remove_wait_queue_locked(&sem
->wait
, &wait
);
150 wake_up_locked(&sem
->wait
);
151 spin_unlock_irqrestore(&sem
->wait
.lock
, flags
);
153 tsk
->state
= TASK_RUNNING
;
158 * Trylock failed - make sure we correct for
159 * having decremented the count.
161 * We could have done the trylock with a
162 * single "cmpxchg" without failure cases,
163 * but then it wouldn't work on a 386.
165 asmlinkage
int __down_trylock(struct semaphore
* sem
)
170 spin_lock_irqsave(&sem
->wait
.lock
, flags
);
171 sleepers
= sem
->sleepers
+ 1;
175 * Add "everybody else" and us into it. They aren't
176 * playing, because we own the spinlock in the
179 if (!atomic_add_negative(sleepers
, &sem
->count
)) {
180 wake_up_locked(&sem
->wait
);
183 spin_unlock_irqrestore(&sem
->wait
.lock
, flags
);