2 * linux/arch/m32r/semaphore.c
5 * M32R semaphore implementation.
7 * Copyright (c) 2002 - 2004 Hitoshi Yamamoto
11 * i386 semaphore implementation.
13 * (C) Copyright 1999 Linus Torvalds
15 * Portions Copyright 1999 Red Hat, Inc.
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
22 * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org>
24 #include <linux/config.h>
25 #include <linux/sched.h>
26 #include <linux/err.h>
27 #include <linux/init.h>
28 #include <asm/semaphore.h>
31 * Semaphores are implemented using a two-way counter:
32 * The "count" variable is decremented for each process
33 * that tries to acquire the semaphore, while the "sleeping"
34 * variable is a count of such acquires.
36 * Notably, the inline "up()" and "down()" functions can
37 * efficiently test if they need to do any extra work (up
38 * needs to do something only if count was negative before
39 * the increment operation.
41 * "sleeping" and the contention routine ordering is protected
42 * by the spinlock in the semaphore's waitqueue head.
44 * Note that these functions are only called when there is
45 * contention on the lock, and as such all this is the
46 * "non-critical" part of the whole semaphore business. The
47 * critical part is the inline stuff in <asm/semaphore.h>
48 * where we want to avoid any extra jumps and calls.
53 * - only on a boundary condition do we need to care. When we go
54 * from a negative count to a non-negative, we wake people up.
55 * - when we go from a non-negative count to a negative do we
56 * (a) synchronize with the "sleeper" count and (b) make sure
57 * that we're on the wakeup list before we synchronize so that
58 * we cannot lose wakeup events.
61 asmlinkage
void __up(struct semaphore
*sem
)
66 asmlinkage
void __sched
__down(struct semaphore
* sem
)
68 struct task_struct
*tsk
= current
;
69 DECLARE_WAITQUEUE(wait
, tsk
);
72 tsk
->state
= TASK_UNINTERRUPTIBLE
;
73 spin_lock_irqsave(&sem
->wait
.lock
, flags
);
74 add_wait_queue_exclusive_locked(&sem
->wait
, &wait
);
78 int sleepers
= sem
->sleepers
;
81 * Add "everybody else" into it. They aren't
82 * playing, because we own the spinlock in
83 * the wait_queue_head.
85 if (!atomic_add_negative(sleepers
- 1, &sem
->count
)) {
89 sem
->sleepers
= 1; /* us - see -1 above */
90 spin_unlock_irqrestore(&sem
->wait
.lock
, flags
);
94 spin_lock_irqsave(&sem
->wait
.lock
, flags
);
95 tsk
->state
= TASK_UNINTERRUPTIBLE
;
97 remove_wait_queue_locked(&sem
->wait
, &wait
);
98 wake_up_locked(&sem
->wait
);
99 spin_unlock_irqrestore(&sem
->wait
.lock
, flags
);
100 tsk
->state
= TASK_RUNNING
;
103 asmlinkage
int __sched
__down_interruptible(struct semaphore
* sem
)
106 struct task_struct
*tsk
= current
;
107 DECLARE_WAITQUEUE(wait
, tsk
);
110 tsk
->state
= TASK_INTERRUPTIBLE
;
111 spin_lock_irqsave(&sem
->wait
.lock
, flags
);
112 add_wait_queue_exclusive_locked(&sem
->wait
, &wait
);
116 int sleepers
= sem
->sleepers
;
119 * With signals pending, this turns into
120 * the trylock failure case - we won't be
121 * sleeping, and we* can't get the lock as
122 * it has contention. Just correct the count
125 if (signal_pending(current
)) {
128 atomic_add(sleepers
, &sem
->count
);
133 * Add "everybody else" into it. They aren't
134 * playing, because we own the spinlock in
135 * wait_queue_head. The "-1" is because we're
136 * still hoping to get the semaphore.
138 if (!atomic_add_negative(sleepers
- 1, &sem
->count
)) {
142 sem
->sleepers
= 1; /* us - see -1 above */
143 spin_unlock_irqrestore(&sem
->wait
.lock
, flags
);
147 spin_lock_irqsave(&sem
->wait
.lock
, flags
);
148 tsk
->state
= TASK_INTERRUPTIBLE
;
150 remove_wait_queue_locked(&sem
->wait
, &wait
);
151 wake_up_locked(&sem
->wait
);
152 spin_unlock_irqrestore(&sem
->wait
.lock
, flags
);
154 tsk
->state
= TASK_RUNNING
;
159 * Trylock failed - make sure we correct for
160 * having decremented the count.
162 * We could have done the trylock with a
163 * single "cmpxchg" without failure cases,
164 * but then it wouldn't work on a 386.
166 asmlinkage
int __down_trylock(struct semaphore
* sem
)
171 spin_lock_irqsave(&sem
->wait
.lock
, flags
);
172 sleepers
= sem
->sleepers
+ 1;
176 * Add "everybody else" and us into it. They aren't
177 * playing, because we own the spinlock in the
180 if (!atomic_add_negative(sleepers
, &sem
->count
)) {
181 wake_up_locked(&sem
->wait
);
184 spin_unlock_irqrestore(&sem
->wait
.lock
, flags
);