docs: Encourage better changelogs in the development process document
[linux/fpc-iii.git] / kernel / wait.c
blob42a2dbc181c89ca708360d84ee53a5ca21640b44
1 /*
2 * Generic waiting primitives.
4 * (C) 2004 William Irwin, Oracle
5 */
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/sched.h>
9 #include <linux/mm.h>
10 #include <linux/wait.h>
11 #include <linux/hash.h>
13 void init_waitqueue_head(wait_queue_head_t *q)
15 spin_lock_init(&q->lock);
16 INIT_LIST_HEAD(&q->task_list);
19 EXPORT_SYMBOL(init_waitqueue_head);
21 void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
23 unsigned long flags;
25 wait->flags &= ~WQ_FLAG_EXCLUSIVE;
26 spin_lock_irqsave(&q->lock, flags);
27 __add_wait_queue(q, wait);
28 spin_unlock_irqrestore(&q->lock, flags);
30 EXPORT_SYMBOL(add_wait_queue);
32 void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
34 unsigned long flags;
36 wait->flags |= WQ_FLAG_EXCLUSIVE;
37 spin_lock_irqsave(&q->lock, flags);
38 __add_wait_queue_tail(q, wait);
39 spin_unlock_irqrestore(&q->lock, flags);
41 EXPORT_SYMBOL(add_wait_queue_exclusive);
43 void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
45 unsigned long flags;
47 spin_lock_irqsave(&q->lock, flags);
48 __remove_wait_queue(q, wait);
49 spin_unlock_irqrestore(&q->lock, flags);
51 EXPORT_SYMBOL(remove_wait_queue);
55 * Note: we use "set_current_state()" _after_ the wait-queue add,
56 * because we need a memory barrier there on SMP, so that any
57 * wake-function that tests for the wait-queue being active
58 * will be guaranteed to see waitqueue addition _or_ subsequent
59 * tests in this thread will see the wakeup having taken place.
61 * The spin_unlock() itself is semi-permeable and only protects
62 * one way (it only protects stuff inside the critical region and
63 * stops them from bleeding out - it would still allow subsequent
64 * loads to move into the critical region).
66 void
67 prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
69 unsigned long flags;
71 wait->flags &= ~WQ_FLAG_EXCLUSIVE;
72 spin_lock_irqsave(&q->lock, flags);
73 if (list_empty(&wait->task_list))
74 __add_wait_queue(q, wait);
75 set_current_state(state);
76 spin_unlock_irqrestore(&q->lock, flags);
78 EXPORT_SYMBOL(prepare_to_wait);
80 void
81 prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
83 unsigned long flags;
85 wait->flags |= WQ_FLAG_EXCLUSIVE;
86 spin_lock_irqsave(&q->lock, flags);
87 if (list_empty(&wait->task_list))
88 __add_wait_queue_tail(q, wait);
89 set_current_state(state);
90 spin_unlock_irqrestore(&q->lock, flags);
92 EXPORT_SYMBOL(prepare_to_wait_exclusive);
95 * finish_wait - clean up after waiting in a queue
96 * @q: waitqueue waited on
97 * @wait: wait descriptor
99 * Sets current thread back to running state and removes
100 * the wait descriptor from the given waitqueue if still
101 * queued.
103 void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
105 unsigned long flags;
107 __set_current_state(TASK_RUNNING);
109 * We can check for list emptiness outside the lock
110 * IFF:
111 * - we use the "careful" check that verifies both
112 * the next and prev pointers, so that there cannot
113 * be any half-pending updates in progress on other
114 * CPU's that we haven't seen yet (and that might
115 * still change the stack area.
116 * and
117 * - all other users take the lock (ie we can only
118 * have _one_ other CPU that looks at or modifies
119 * the list).
121 if (!list_empty_careful(&wait->task_list)) {
122 spin_lock_irqsave(&q->lock, flags);
123 list_del_init(&wait->task_list);
124 spin_unlock_irqrestore(&q->lock, flags);
127 EXPORT_SYMBOL(finish_wait);
130 * abort_exclusive_wait - abort exclusive waiting in a queue
131 * @q: waitqueue waited on
132 * @wait: wait descriptor
133 * @state: runstate of the waiter to be woken
134 * @key: key to identify a wait bit queue or %NULL
136 * Sets current thread back to running state and removes
137 * the wait descriptor from the given waitqueue if still
138 * queued.
140 * Wakes up the next waiter if the caller is concurrently
141 * woken up through the queue.
143 * This prevents waiter starvation where an exclusive waiter
144 * aborts and is woken up concurrently and noone wakes up
145 * the next waiter.
147 void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
148 unsigned int mode, void *key)
150 unsigned long flags;
152 __set_current_state(TASK_RUNNING);
153 spin_lock_irqsave(&q->lock, flags);
154 if (!list_empty(&wait->task_list))
155 list_del_init(&wait->task_list);
156 else if (waitqueue_active(q))
157 __wake_up_common(q, mode, 1, 0, key);
158 spin_unlock_irqrestore(&q->lock, flags);
160 EXPORT_SYMBOL(abort_exclusive_wait);
162 int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
164 int ret = default_wake_function(wait, mode, sync, key);
166 if (ret)
167 list_del_init(&wait->task_list);
168 return ret;
170 EXPORT_SYMBOL(autoremove_wake_function);
172 int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *arg)
174 struct wait_bit_key *key = arg;
175 struct wait_bit_queue *wait_bit
176 = container_of(wait, struct wait_bit_queue, wait);
178 if (wait_bit->key.flags != key->flags ||
179 wait_bit->key.bit_nr != key->bit_nr ||
180 test_bit(key->bit_nr, key->flags))
181 return 0;
182 else
183 return autoremove_wake_function(wait, mode, sync, key);
185 EXPORT_SYMBOL(wake_bit_function);
188 * To allow interruptible waiting and asynchronous (i.e. nonblocking)
189 * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are
190 * permitted return codes. Nonzero return codes halt waiting and return.
192 int __sched
193 __wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q,
194 int (*action)(void *), unsigned mode)
196 int ret = 0;
198 do {
199 prepare_to_wait(wq, &q->wait, mode);
200 if (test_bit(q->key.bit_nr, q->key.flags))
201 ret = (*action)(q->key.flags);
202 } while (test_bit(q->key.bit_nr, q->key.flags) && !ret);
203 finish_wait(wq, &q->wait);
204 return ret;
206 EXPORT_SYMBOL(__wait_on_bit);
208 int __sched out_of_line_wait_on_bit(void *word, int bit,
209 int (*action)(void *), unsigned mode)
211 wait_queue_head_t *wq = bit_waitqueue(word, bit);
212 DEFINE_WAIT_BIT(wait, word, bit);
214 return __wait_on_bit(wq, &wait, action, mode);
216 EXPORT_SYMBOL(out_of_line_wait_on_bit);
218 int __sched
219 __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
220 int (*action)(void *), unsigned mode)
222 do {
223 int ret;
225 prepare_to_wait_exclusive(wq, &q->wait, mode);
226 if (!test_bit(q->key.bit_nr, q->key.flags))
227 continue;
228 ret = action(q->key.flags);
229 if (!ret)
230 continue;
231 abort_exclusive_wait(wq, &q->wait, mode, &q->key);
232 return ret;
233 } while (test_and_set_bit(q->key.bit_nr, q->key.flags));
234 finish_wait(wq, &q->wait);
235 return 0;
237 EXPORT_SYMBOL(__wait_on_bit_lock);
239 int __sched out_of_line_wait_on_bit_lock(void *word, int bit,
240 int (*action)(void *), unsigned mode)
242 wait_queue_head_t *wq = bit_waitqueue(word, bit);
243 DEFINE_WAIT_BIT(wait, word, bit);
245 return __wait_on_bit_lock(wq, &wait, action, mode);
247 EXPORT_SYMBOL(out_of_line_wait_on_bit_lock);
249 void __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
251 struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
252 if (waitqueue_active(wq))
253 __wake_up(wq, TASK_NORMAL, 1, &key);
255 EXPORT_SYMBOL(__wake_up_bit);
258 * wake_up_bit - wake up a waiter on a bit
259 * @word: the word being waited on, a kernel virtual address
260 * @bit: the bit of the word being waited on
262 * There is a standard hashed waitqueue table for generic use. This
263 * is the part of the hashtable's accessor API that wakes up waiters
264 * on a bit. For instance, if one were to have waiters on a bitflag,
265 * one would call wake_up_bit() after clearing the bit.
267 * In order for this to function properly, as it uses waitqueue_active()
268 * internally, some kind of memory barrier must be done prior to calling
269 * this. Typically, this will be smp_mb__after_clear_bit(), but in some
270 * cases where bitflags are manipulated non-atomically under a lock, one
271 * may need to use a less regular barrier, such fs/inode.c's smp_mb(),
272 * because spin_unlock() does not guarantee a memory barrier.
274 void wake_up_bit(void *word, int bit)
276 __wake_up_bit(bit_waitqueue(word, bit), word, bit);
278 EXPORT_SYMBOL(wake_up_bit);
280 wait_queue_head_t *bit_waitqueue(void *word, int bit)
282 const int shift = BITS_PER_LONG == 32 ? 5 : 6;
283 const struct zone *zone = page_zone(virt_to_page(word));
284 unsigned long val = (unsigned long)word << shift | bit;
286 return &zone->wait_table[hash_long(val, zone->wait_table_bits)];
288 EXPORT_SYMBOL(bit_waitqueue);